hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
a8b1f9bb8fa375f1a97a6a00ab12ed1b29cddcf4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* File: mat_add.cu * Purpose: Implement matrix addition on a gpu using cuda * * Output: Result of matrix addition. * * Notes: * 1. There are m blocks with n threads each. */ #include <stdio.h> #include <stdlib.h> #include <math.h> //#include "cuPrintf_hip.cuh" //#include "cuPrintf.hip" //#include "utils/cuPrintf.cu" /*--------------------------------------------------------------------- * Kernel: Mat_add * Purpose: Implement matrix addition * In args: A, B, m, n * Out arg: C */ __global__ void rotMatFunc(float matIn[], float matOut[], int dimX, int dimY, float rotMat[]) { // int y = blockIdx.y; /// int x = blockIdx.x * blockDim.x + threadIdx.x; int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if ( x >= dimX || y >= dimY) return; //printf("x,y = %d %d, blockIdx.x,y= %d %d, blockDim.x,y = %d %d, threadIdx.x,y= %d %d\n", // x,y, blockIdx.x,blockIdx.y, blockDim.x,blockDim.y, threadIdx.x,threadIdx.y); float xOut,yOut; float xIn, yIn; int iIn, jIn; float dimXf=(float)dimX, dimYf=(float)dimY; int x0=dimX/2, y0=dimY/2; // xOut = (float)(x - x0)/dimXf; // yOut = (float)(y - y0)/dimYf; xOut = (float)(x - x0); yOut = (float)(y - y0); xIn = rotMat[0] * xOut + rotMat[1] * yOut; yIn = rotMat[2] * xOut + rotMat[3] * yOut; // iIn = int(xIn * dimXf + x0); // jIn = int(yIn * dimYf + y0); iIn = int(xIn + x0); jIn = int(yIn + y0); if ( iIn >= 0 && iIn < dimX && jIn >= 0 && jIn < dimY) { printf("x=%d y=%d iIn=%d jIn=%d in=%d, out=%d\n",x, y, iIn, jIn, iIn*dimY+jIn,x*dimY+y); matOut[x*dimY+y] = matIn[iIn*dimY+jIn]; } /* int indexOfMatrixOut = y + x * dimY; int x0=dimX/2, y0=dimY/2;//this may be passed float xOut,yOut; float xIn, yIn; int iIn, jIn; float dimXf=(float)dimX, dimYf=(float)dimY; xOut = (float)(x - x0)/dimXf; yOut = (float)(y - y0)/dimYf; //printf("x=%d y=%d x0=%d dimXf=%f xOut=%f yOut=%f\n",x, y, x0, dimXf, xOut, yOut); xIn = rotMat[0] * xOut + rotMat[1] * yOut; yIn = rotMat[2] * xOut + rotMat[3] * yOut; //printf("x =%d y=%d xIn=%f yIn=%f\n",x, y, xIn, yIn); iIn = int(xIn * dimXf + x0); jIn = int(yIn * dimYf + y0); int indexOfMatrixIn = jIn + iIn * dimY; if ( iIn >= 0 && iIn < dimX && jIn >= 0 && jIn < dimY) { matOut[indexOfMatrixOut] = matIn[indexOfMatrixIn]; printf("x=%d y=%d in=%d, out=%d vI=%f vO=%f\n",x, y, indexOfMatrixIn,indexOfMatrixOut, matIn[indexOfMatrixIn], matOut[indexOfMatrixOut]); } */ } /* Mat_add */ /*--------------------------------------------------------------------- * Function: Fill_matrix * Purpose: Fill an m x n matrix with random values * In args: m, n * Out arg: A */ void Fill_matrix(float A[], int dimX, int dimY) { int i, j; //numVec, dimVec for (i = 0; i < dimX; i++) for (j = 0; j < dimY; j++) if(i==j )//or (i+j)==(dimX+1)) A[i*dimY+j]=1.0f; else A[i*dimY+j]=0.0f; } /* Read_matrix */ /*--------------------------------------------------------------------- * Function: Print_matrix * Purpose: Print an m x n matrix to stdout * In args: title, A, m, n */ void Print_matrix(const char title[], float A[], int numVec, int dimVec, int m, int n) { int i, j; //numVec, dimVec printf("%s\n", title); for (i = 0; i < m; i++) { for (j = 0; j < n; j++) printf("%.2f ", A[i*dimVec+j]); printf("\n"); } } /* Print_matrix */ void checkError(hipError_t error, const char function[]) { if(error != hipSuccess) { printf("\"%s\" has a problem with error code %d and desc: %s\n", function, error, hipGetErrorString(error)); exit(-1); } } bool checkIfMatricesEqual(float * mat1, float * mat2, float matSize) { int i = 0; for( ; i < matSize; i++) if(mat1[i] != mat2[i]){ printf("values different for i: %d\n", i); printf("mat1[i] = %d, mat2[i] = %d\n", mat1[i], mat2[i]); return false; } return true; } void rotateCPU(float matIn[], float matOut[], int dimX, int dimY, float rotMat[]) { //float fX0,fY0; int x0=dimX/2, y0=dimY/2; //fX0 = (float)iX0; //fY0 = (float)iY0; float xOut,yOut; float xIn, yIn; int iIn, jIn; float dimXf=(float)dimX, dimYf=(float)dimY; for(int x = 0 ; x < dimX; ++x) for(int y = 0 ; y < dimY; ++y){ xOut = (float)(x - x0)/dimXf; yOut = (float)(y - y0)/dimYf; xIn = rotMat[0] * xOut + rotMat[1] * yOut; yIn = rotMat[2] * xOut + rotMat[3] * yOut; iIn = int(xIn * dimXf + x0); jIn = int(yIn * dimYf + y0); if ( iIn >= 0 && iIn < dimX && jIn >= 0 && jIn < dimY) matOut[x*dimY+y] = matIn[iIn*dimY+jIn]; } } /* Host code */ int main(int argc, char* argv[]) { size_t dimX = 900;//mat size size_t dimY = 900; size_t gridX = 9;//mat size size_t gridY = 9; // variables for threads per block, number of blocks. int threadsPerBlock = 32;//, blocksInGrid = 0; //threadsPerBlock = min(_dimY, _dimY); //create cuda event variables hipEvent_t hostStart, hostStop, deviceStart, deviceStop; float timeDifferenceOnHost, timeDifferenceOnDevice; //initialize cuda timing variables hipEventCreate(&hostStart); hipEventCreate(&hostStop); hipEventCreate(&deviceStart); hipEventCreate(&deviceStop); float *h_A, *h_B, *h_B2, *h_rotMat;//PC float *d_A, *d_B, *d_rotMat;//GPU size_t size, matrixSize; /* Get size of matrices */ matrixSize = dimX*dimY; size = matrixSize*sizeof(float); h_A = (float*) calloc(size,1); h_B = (float*) calloc(size,1); h_B2 = (float*) calloc(size,1); h_rotMat = (float*) calloc(4*sizeof(float),1); Fill_matrix(h_A, dimX, dimY); //init rot Matrix h_rotMat[0] = 0.f; h_rotMat[1] = -1.f; //h_rotMat[0] = 0.936f; //h_rotMat[1] = 0.352f; h_rotMat[2] = -h_rotMat[1]; h_rotMat[3] = h_rotMat[0]; Print_matrix("A =", h_A, dimX, dimY, 9, 9); printf("Rotating matrices on CPU...\n"); hipEventRecord(hostStart, 0); //rotate matrix using CPU rotateCPU(h_A ,h_B2, dimX, dimY, h_rotMat); ////////// hipEventRecord(hostStop, 0); hipEventElapsedTime(&timeDifferenceOnHost, hostStart, hostStop); printf("Matrix rotation over. Time taken on CPU: %5.5f\n", timeDifferenceOnHost); Print_matrix("B2(CPU) =", h_B2, dimX, dimY, 9, 9); /* Allocate matrices in device memory */ hipMalloc(&d_A, size); hipMalloc(&d_B, size); hipMalloc(&d_rotMat, 4*sizeof(float)); /* Copy matrices from host memory to device memory */ hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice); hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice); hipMemcpy(d_rotMat, h_rotMat, 4*sizeof(float), hipMemcpyHostToDevice); //create a proper grid block using dim3 /* Invoke kernel using dimX * dimY thread blocks, each of */ /* which contains threadsPerBlock threads */ dim3 block(threadsPerBlock, threadsPerBlock); dim3 grid( (gridX+threadsPerBlock-1)/threadsPerBlock, (gridY+threadsPerBlock-1)/threadsPerBlock ); hipEventRecord(deviceStart, 0); hipLaunchKernelGGL(( rotMatFunc), dim3(grid), dim3(block), 0, 0, d_A, d_B, dimX, dimY, d_rotMat); hipError_t code=hipGetLastError(); if (code) printf("error=%s",hipGetErrorString(code)); else printf("code=%d",code); hipDeviceSynchronize(); hipEventRecord(deviceStop, 0); /* Wait for the kernel to complete */ hipDeviceSynchronize(); hipEventElapsedTime(&timeDifferenceOnDevice, deviceStart, deviceStop); /* Copy result from device memory to host memory */ checkError(hipMemcpy(h_B, d_B, size, hipMemcpyDeviceToHost), "Matrix B Copy from device to Host"); if(checkIfMatricesEqual(h_B, h_B2, matrixSize)) printf("Kernels correct!\n"); else printf("Kernel logic wrong!\n"); printf("Finished addition on GPU. Time taken: %5.5f\n", timeDifferenceOnDevice); printf("Speedup: %5.5f\n", (float)timeDifferenceOnHost/timeDifferenceOnDevice); Print_matrix("The rotated image(CPU) is: ", h_B2, dimX, dimY, 9, 9); Print_matrix("The rotated image(GPU) is: ", h_B, dimX, dimY, 9, 9); /* Free device memory */ hipFree(d_A); hipFree(d_B); /* Free host memory */ free(h_A); free(h_B); free(h_B2); return 0; } /* main */
a8b1f9bb8fa375f1a97a6a00ab12ed1b29cddcf4.cu
/* File: mat_add.cu * Purpose: Implement matrix addition on a gpu using cuda * * Output: Result of matrix addition. * * Notes: * 1. There are m blocks with n threads each. */ #include <stdio.h> #include <stdlib.h> #include <math.h> //#include "cuPrintf.cuh" //#include "cuPrintf.cu" //#include "utils/cuPrintf.cu" /*--------------------------------------------------------------------- * Kernel: Mat_add * Purpose: Implement matrix addition * In args: A, B, m, n * Out arg: C */ __global__ void rotMatFunc(float matIn[], float matOut[], int dimX, int dimY, float rotMat[]) { // int y = blockIdx.y; /// int x = blockIdx.x * blockDim.x + threadIdx.x; int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if ( x >= dimX || y >= dimY) return; //printf("x,y = %d %d, blockIdx.x,y= %d %d, blockDim.x,y = %d %d, threadIdx.x,y= %d %d\n", // x,y, blockIdx.x,blockIdx.y, blockDim.x,blockDim.y, threadIdx.x,threadIdx.y); float xOut,yOut; float xIn, yIn; int iIn, jIn; float dimXf=(float)dimX, dimYf=(float)dimY; int x0=dimX/2, y0=dimY/2; // xOut = (float)(x - x0)/dimXf; // yOut = (float)(y - y0)/dimYf; xOut = (float)(x - x0); yOut = (float)(y - y0); xIn = rotMat[0] * xOut + rotMat[1] * yOut; yIn = rotMat[2] * xOut + rotMat[3] * yOut; // iIn = int(xIn * dimXf + x0); // jIn = int(yIn * dimYf + y0); iIn = int(xIn + x0); jIn = int(yIn + y0); if ( iIn >= 0 && iIn < dimX && jIn >= 0 && jIn < dimY) { printf("x=%d y=%d iIn=%d jIn=%d in=%d, out=%d\n",x, y, iIn, jIn, iIn*dimY+jIn,x*dimY+y); matOut[x*dimY+y] = matIn[iIn*dimY+jIn]; } /* int indexOfMatrixOut = y + x * dimY; int x0=dimX/2, y0=dimY/2;//this may be passed float xOut,yOut; float xIn, yIn; int iIn, jIn; float dimXf=(float)dimX, dimYf=(float)dimY; xOut = (float)(x - x0)/dimXf; yOut = (float)(y - y0)/dimYf; //printf("x=%d y=%d x0=%d dimXf=%f xOut=%f yOut=%f\n",x, y, x0, dimXf, xOut, yOut); xIn = rotMat[0] * xOut + rotMat[1] * yOut; yIn = rotMat[2] * xOut + rotMat[3] * yOut; //printf("x =%d y=%d xIn=%f yIn=%f\n",x, y, xIn, yIn); iIn = int(xIn * dimXf + x0); jIn = int(yIn * dimYf + y0); int indexOfMatrixIn = jIn + iIn * dimY; if ( iIn >= 0 && iIn < dimX && jIn >= 0 && jIn < dimY) { matOut[indexOfMatrixOut] = matIn[indexOfMatrixIn]; printf("x=%d y=%d in=%d, out=%d vI=%f vO=%f\n",x, y, indexOfMatrixIn,indexOfMatrixOut, matIn[indexOfMatrixIn], matOut[indexOfMatrixOut]); } */ } /* Mat_add */ /*--------------------------------------------------------------------- * Function: Fill_matrix * Purpose: Fill an m x n matrix with random values * In args: m, n * Out arg: A */ void Fill_matrix(float A[], int dimX, int dimY) { int i, j; //numVec, dimVec for (i = 0; i < dimX; i++) for (j = 0; j < dimY; j++) if(i==j )//or (i+j)==(dimX+1)) A[i*dimY+j]=1.0f; else A[i*dimY+j]=0.0f; } /* Read_matrix */ /*--------------------------------------------------------------------- * Function: Print_matrix * Purpose: Print an m x n matrix to stdout * In args: title, A, m, n */ void Print_matrix(const char title[], float A[], int numVec, int dimVec, int m, int n) { int i, j; //numVec, dimVec printf("%s\n", title); for (i = 0; i < m; i++) { for (j = 0; j < n; j++) printf("%.2f ", A[i*dimVec+j]); printf("\n"); } } /* Print_matrix */ void checkError(cudaError_t error, const char function[]) { if(error != cudaSuccess) { printf("\"%s\" has a problem with error code %d and desc: %s\n", function, error, cudaGetErrorString(error)); exit(-1); } } bool checkIfMatricesEqual(float * mat1, float * mat2, float matSize) { int i = 0; for( ; i < matSize; i++) if(mat1[i] != mat2[i]){ printf("values different for i: %d\n", i); printf("mat1[i] = %d, mat2[i] = %d\n", mat1[i], mat2[i]); return false; } return true; } void rotateCPU(float matIn[], float matOut[], int dimX, int dimY, float rotMat[]) { //float fX0,fY0; int x0=dimX/2, y0=dimY/2; //fX0 = (float)iX0; //fY0 = (float)iY0; float xOut,yOut; float xIn, yIn; int iIn, jIn; float dimXf=(float)dimX, dimYf=(float)dimY; for(int x = 0 ; x < dimX; ++x) for(int y = 0 ; y < dimY; ++y){ xOut = (float)(x - x0)/dimXf; yOut = (float)(y - y0)/dimYf; xIn = rotMat[0] * xOut + rotMat[1] * yOut; yIn = rotMat[2] * xOut + rotMat[3] * yOut; iIn = int(xIn * dimXf + x0); jIn = int(yIn * dimYf + y0); if ( iIn >= 0 && iIn < dimX && jIn >= 0 && jIn < dimY) matOut[x*dimY+y] = matIn[iIn*dimY+jIn]; } } /* Host code */ int main(int argc, char* argv[]) { size_t dimX = 900;//mat size size_t dimY = 900; size_t gridX = 9;//mat size size_t gridY = 9; // variables for threads per block, number of blocks. int threadsPerBlock = 32;//, blocksInGrid = 0; //threadsPerBlock = min(_dimY, _dimY); //create cuda event variables cudaEvent_t hostStart, hostStop, deviceStart, deviceStop; float timeDifferenceOnHost, timeDifferenceOnDevice; //initialize cuda timing variables cudaEventCreate(&hostStart); cudaEventCreate(&hostStop); cudaEventCreate(&deviceStart); cudaEventCreate(&deviceStop); float *h_A, *h_B, *h_B2, *h_rotMat;//PC float *d_A, *d_B, *d_rotMat;//GPU size_t size, matrixSize; /* Get size of matrices */ matrixSize = dimX*dimY; size = matrixSize*sizeof(float); h_A = (float*) calloc(size,1); h_B = (float*) calloc(size,1); h_B2 = (float*) calloc(size,1); h_rotMat = (float*) calloc(4*sizeof(float),1); Fill_matrix(h_A, dimX, dimY); //init rot Matrix h_rotMat[0] = 0.f; h_rotMat[1] = -1.f; //h_rotMat[0] = 0.936f; //h_rotMat[1] = 0.352f; h_rotMat[2] = -h_rotMat[1]; h_rotMat[3] = h_rotMat[0]; Print_matrix("A =", h_A, dimX, dimY, 9, 9); printf("Rotating matrices on CPU...\n"); cudaEventRecord(hostStart, 0); //rotate matrix using CPU rotateCPU(h_A ,h_B2, dimX, dimY, h_rotMat); ////////// cudaEventRecord(hostStop, 0); cudaEventElapsedTime(&timeDifferenceOnHost, hostStart, hostStop); printf("Matrix rotation over. Time taken on CPU: %5.5f\n", timeDifferenceOnHost); Print_matrix("B2(CPU) =", h_B2, dimX, dimY, 9, 9); /* Allocate matrices in device memory */ cudaMalloc(&d_A, size); cudaMalloc(&d_B, size); cudaMalloc(&d_rotMat, 4*sizeof(float)); /* Copy matrices from host memory to device memory */ cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice); cudaMemcpy(d_rotMat, h_rotMat, 4*sizeof(float), cudaMemcpyHostToDevice); //create a proper grid block using dim3 /* Invoke kernel using dimX * dimY thread blocks, each of */ /* which contains threadsPerBlock threads */ dim3 block(threadsPerBlock, threadsPerBlock); dim3 grid( (gridX+threadsPerBlock-1)/threadsPerBlock, (gridY+threadsPerBlock-1)/threadsPerBlock ); cudaEventRecord(deviceStart, 0); rotMatFunc<<<grid, block>>>(d_A, d_B, dimX, dimY, d_rotMat); cudaError_t code=cudaGetLastError(); if (code) printf("error=%s",cudaGetErrorString(code)); else printf("code=%d",code); cudaDeviceSynchronize(); cudaEventRecord(deviceStop, 0); /* Wait for the kernel to complete */ cudaThreadSynchronize(); cudaEventElapsedTime(&timeDifferenceOnDevice, deviceStart, deviceStop); /* Copy result from device memory to host memory */ checkError(cudaMemcpy(h_B, d_B, size, cudaMemcpyDeviceToHost), "Matrix B Copy from device to Host"); if(checkIfMatricesEqual(h_B, h_B2, matrixSize)) printf("Kernels correct!\n"); else printf("Kernel logic wrong!\n"); printf("Finished addition on GPU. Time taken: %5.5f\n", timeDifferenceOnDevice); printf("Speedup: %5.5f\n", (float)timeDifferenceOnHost/timeDifferenceOnDevice); Print_matrix("The rotated image(CPU) is: ", h_B2, dimX, dimY, 9, 9); Print_matrix("The rotated image(GPU) is: ", h_B, dimX, dimY, 9, 9); /* Free device memory */ cudaFree(d_A); cudaFree(d_B); /* Free host memory */ free(h_A); free(h_B); free(h_B2); return 0; } /* main */
a9e0626cd4a11fb4350269cd4c7ef52ff776b3af.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void mergeGPU1d( unsigned char *image1, unsigned char *image2, unsigned char *res, int pixels ) { int i = threadIdx.x + blockIdx.x*blockDim.x; if( i < pixels ) { int idx = 3*i; int r1 = image1[ idx+2 ]; int g1 = image1[ idx+1 ]; int b1 = image1[ idx ]; int r2 = image2[ idx+2 ]; int g2 = image2[ idx+1 ]; int b2 = image2[ idx ]; int r = (int)( ( (float)r1 + (float)r2 )*0.5f ); int g = (int)( ( (float)g1 + (float)g2 )*0.5f ); int b = (int)( ( (float)b1 + (float)b2 )*0.5f ); res[ idx+2 ] = (unsigned char)r; res[ idx+1 ] = (unsigned char)g; res[ idx ] = (unsigned char)b; } }
a9e0626cd4a11fb4350269cd4c7ef52ff776b3af.cu
#include "includes.h" __global__ void mergeGPU1d( unsigned char *image1, unsigned char *image2, unsigned char *res, int pixels ) { int i = threadIdx.x + blockIdx.x*blockDim.x; if( i < pixels ) { int idx = 3*i; int r1 = image1[ idx+2 ]; int g1 = image1[ idx+1 ]; int b1 = image1[ idx ]; int r2 = image2[ idx+2 ]; int g2 = image2[ idx+1 ]; int b2 = image2[ idx ]; int r = (int)( ( (float)r1 + (float)r2 )*0.5f ); int g = (int)( ( (float)g1 + (float)g2 )*0.5f ); int b = (int)( ( (float)b1 + (float)b2 )*0.5f ); res[ idx+2 ] = (unsigned char)r; res[ idx+1 ] = (unsigned char)g; res[ idx ] = (unsigned char)b; } }
47a27034f2a31d182610db4a0c77871724cc6096.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "rocblas.h" #include <time.h> #include <windows.h> #include <iostream> using namespace std; int gettimeofday(struct timeval *tp, void *tzp) { time_t clock; struct tm tm; SYSTEMTIME wtm; GetLocalTime(&wtm); tm.tm_year = wtm.wYear - 1900; tm.tm_mon = wtm.wMonth - 1; tm.tm_mday = wtm.wDay; tm.tm_hour = wtm.wHour; tm.tm_min = wtm.wMinute; tm.tm_sec = wtm.wSecond; tm.tm_isdst = -1; clock = mktime(&tm); tp->tv_sec = clock; tp->tv_usec = wtm.wMilliseconds * 1000; return (0); } // int const M = 8192; int const N = M; extern void printMatrix(float* m_Matrix, int W, int H); int main() { // hipblasStatus_t status; // float *h_A = (float*)malloc(N*M * sizeof(float)); float *h_B = (float*)malloc(N*M * sizeof(float)); // float *h_C = (float*)malloc(M*M * sizeof(float)); // 0-10 for (int i = 0; i < N*M; i++) { h_A[i] = i%3; h_B[i] = i%2; } printMatrix(h_A,2,2); cout << endl; printMatrix(h_B,2,2); hipSetDevice(0); /* ** GPU */ // CUBLAS hipblasHandle_t handle; status = hipblasCreate(&handle); if (status != HIPBLAS_STATUS_SUCCESS) { if (status == HIPBLAS_STATUS_NOT_INITIALIZED) { cout << "CUBLAS " << endl; } getchar(); return EXIT_FAILURE; } struct timeval t1, t2; gettimeofday(&t1, NULL); double timeuse; time_t z1 = time(NULL); printf("t1 %d\n", z1); hipEvent_t start_device, stop_device; float time_device; hipEventCreate(&start_device); hipEventCreate(&stop_device); hipEventRecord(start_device, 0); clock_t start_devence = clock(); float *d_A, *d_B, *d_C, *d_D; hipMalloc((void**)&d_A, N*M * sizeof(float)); hipMalloc((void**)&d_B, N*M * sizeof(float)); hipMalloc((void**)&d_C, N*M * sizeof(float)); hipMalloc((void**)&d_D, N*M * sizeof(float)); hipblasSetVector(N*M,sizeof(float),h_A,1,d_A,1); hipblasSetVector(N*M,sizeof(float),h_B,1,d_B,1); // hipDeviceSynchronize(); // float a = 1; float b = 0; // hipblasSgemm( handle, // blas HIPBLAS_OP_T, // A HIPBLAS_OP_T, // B M, // A, C M, // B, C N, // A B &a, // d_A, // A N, // lda d_B, // B M, // ldb &b, // d_C, // C () M // ldc ); // hipDeviceSynchronize(); // hipblasGetVector( M*M, // sizeof(float), // d_C, // GPU 1, // h_C, // 1 // ); for (int i = 0; i < 4; i++) { h_C[i] = h_C[i*M]; } printMatrix(h_C,2,2); hipEventRecord(stop_device, 0); hipEventSynchronize(stop_device); hipEventElapsedTime(&time_device, start_device, stop_device); hipEventDestroy(start_device); hipEventDestroy(stop_device); cout << "gputime=" << time_device << "ms" << endl; cout << "devence=" << (double)(clock() - start_devence) / 1 << "ms" << endl; gettimeofday(&t2, NULL); timeuse = t2.tv_sec - t1.tv_sec + (t2.tv_usec - t1.tv_usec) / 1000000.0; printf("Use Time:%f\n", timeuse); // free(h_A); free(h_B); free(h_C); hipFree(d_A); hipFree(d_B); hipFree(d_C); // CUBLAS hipblasDestroy(handle); time_t z2 = time(NULL); printf("t2 %d\n", z2); printf("time %d\n", z2-z1); getchar(); return 0; }
47a27034f2a31d182610db4a0c77871724cc6096.cu
#include "cuda_runtime.h" #include "cublas_v2.h" #include <time.h> #include <windows.h> #include <iostream> using namespace std; int gettimeofday(struct timeval *tp, void *tzp) { time_t clock; struct tm tm; SYSTEMTIME wtm; GetLocalTime(&wtm); tm.tm_year = wtm.wYear - 1900; tm.tm_mon = wtm.wMonth - 1; tm.tm_mday = wtm.wDay; tm.tm_hour = wtm.wHour; tm.tm_min = wtm.wMinute; tm.tm_sec = wtm.wSecond; tm.tm_isdst = -1; clock = mktime(&tm); tp->tv_sec = clock; tp->tv_usec = wtm.wMilliseconds * 1000; return (0); } // 定义测试矩阵的维度 int const M = 8192; int const N = M; extern void printMatrix(float* m_Matrix, int W, int H); int main() { // 定义状态变量 cublasStatus_t status; // 在 内存 中为将要计算的矩阵开辟空间 float *h_A = (float*)malloc(N*M * sizeof(float)); float *h_B = (float*)malloc(N*M * sizeof(float)); // 在 内存 中为将要存放运算结果的矩阵开辟空间 float *h_C = (float*)malloc(M*M * sizeof(float)); // 为待运算矩阵的元素赋予 0-10 范围内的随机数 for (int i = 0; i < N*M; i++) { h_A[i] = i%3; h_B[i] = i%2; } printMatrix(h_A,2,2); cout << endl; printMatrix(h_B,2,2); cudaSetDevice(0); /* ** GPU 计算矩阵相乘 */ // 创建并初始化 CUBLAS 库对象 cublasHandle_t handle; status = cublasCreate(&handle); if (status != CUBLAS_STATUS_SUCCESS) { if (status == CUBLAS_STATUS_NOT_INITIALIZED) { cout << "CUBLAS 对象实例化出错" << endl; } getchar(); return EXIT_FAILURE; } struct timeval t1, t2; gettimeofday(&t1, NULL); double timeuse; time_t z1 = time(NULL); printf("t1 %d\n", z1); cudaEvent_t start_device, stop_device; float time_device; cudaEventCreate(&start_device); cudaEventCreate(&stop_device); cudaEventRecord(start_device, 0); clock_t start_devence = clock(); float *d_A, *d_B, *d_C, *d_D; cudaMalloc((void**)&d_A, N*M * sizeof(float)); cudaMalloc((void**)&d_B, N*M * sizeof(float)); cudaMalloc((void**)&d_C, N*M * sizeof(float)); cudaMalloc((void**)&d_D, N*M * sizeof(float)); cublasSetVector(N*M,sizeof(float),h_A,1,d_A,1); cublasSetVector(N*M,sizeof(float),h_B,1,d_B,1); // 同步函数 cudaThreadSynchronize(); // 传递进矩阵相乘函数中的参数,具体含义请参考函数手册。 float a = 1; float b = 0; // 矩阵相乘。该函数必然将数组解析成列优先数组 cublasSgemm( handle, // blas 库对象 CUBLAS_OP_T, // 矩阵 A 属性参数 CUBLAS_OP_T, // 矩阵 B 属性参数 M, // A, C 的行数 M, // B, C 的列数 N, // A 的列数和 B 的行数 &a, // 运算式的 α 值 d_A, // A 在显存中的地址 N, // lda d_B, // B 在显存中的地址 M, // ldb &b, // 运算式的 β 值 d_C, // C 在显存中的地址(结果矩阵) M // ldc ); // 同步函数 cudaThreadSynchronize(); // 从 显存 中取出运算结果至 内存中去 cublasGetVector( M*M, // 要取出元素的个数 sizeof(float), // 每个元素大小 d_C, // GPU 端起始地址 1, // 连续元素之间的存储间隔 h_C, // 主机端起始地址 1 // 连续元素之间的存储间隔 ); for (int i = 0; i < 4; i++) { h_C[i] = h_C[i*M]; } printMatrix(h_C,2,2); cudaEventRecord(stop_device, 0); cudaEventSynchronize(stop_device); cudaEventElapsedTime(&time_device, start_device, stop_device); cudaEventDestroy(start_device); cudaEventDestroy(stop_device); cout << "gputime=" << time_device << "ms" << endl; cout << "devence=" << (double)(clock() - start_devence) / 1 << "ms" << endl; gettimeofday(&t2, NULL); timeuse = t2.tv_sec - t1.tv_sec + (t2.tv_usec - t1.tv_usec) / 1000000.0; printf("Use Time:%f\n", timeuse); // 清理掉使用过的内存 free(h_A); free(h_B); free(h_C); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); // 释放 CUBLAS 库对象 cublasDestroy(handle); time_t z2 = time(NULL); printf("t2 %d\n", z2); printf("time %d\n", z2-z1); getchar(); return 0; }
592eb797681d1b9870b417dbf2608f54fcf73327.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* STREAM benchmark implementation in CUDA. COPY: a(i) = b(i) SCALE: a(i) = q*b(i) SUM: a(i) = b(i) + c(i) TRIAD: a(i) = b(i) + q*c(i) It measures the memory system on the device. The implementation is in single precision. Code based on the code developed by John D. McCalpin http://www.cs.virginia.edu/stream/FTP/Code/stream.c Written by: Massimiliano Fatica, NVIDIA Corporation */ //#define N 200000000 #define NTIMES 1000 #include <stdio.h> #include <float.h> #include <limits.h> #include <sys/time.h> #include <fstream> #include <iostream> #include <stdlib.h> #include <string.h> # ifndef MIN # define MIN(x,y) ((x)<(y)?(x):(y)) # endif # ifndef MAX # define MAX(x,y) ((x)>(y)?(x):(y)) # endif static double avgtime[4] = {0};//, maxtime[4] = {0}, //mintime[4] = {FLT_MAX,FLT_MAX,FLT_MAX,FLT_MAX}; const char *label[4] = {"Copy: ", "Scale: ", "Add: ", "Triad: "}; /* A gettimeofday routine to give access to the wall clock timer on most UNIX-like systems. */ double mysecond() { struct timeval tp; struct timezone tzp; int i = gettimeofday(&tp,&tzp); return ( (double) tp.tv_sec + (double) tp.tv_usec * 1.e-6 ); } __global__ void set_array(float *a, float value, int len) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < len) a[idx] = value; } __global__ void STREAM_Copy(float *a, float *b, int len) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < len) b[idx] = a[idx]; } __global__ void STREAM_Scale(float *a, float *b, float scale, int len) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < len) b[idx] = scale* a[idx]; } __global__ void STREAM_Add( float *a, float *b, float *c, int len) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < len) c[idx] = a[idx]+b[idx]; } __global__ void STREAM_Triad( float *a, float *b, float *c, float scalar, int len) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < len) c[idx] = a[idx]+scalar*b[idx]; } int main(int argc, const char* argv[]) { float *d_a, *d_b, *d_c; int j,k; double times[4][NTIMES]; float scalar; int N = 1000; int bsize = 128; for (j=1;j<argc;j++){ if (strcmp(argv[j],"-N") == 0) { j++; N = atoi(argv[j]); } if (strcmp(argv[j],"-B") == 0) { j++; bsize = atoi(argv[j]); } } printf(" STREAM Benchmark implementation in CUDA\n"); printf(" Array size (single precision)=%d\n",N); /* Allocate memory on device */ hipMalloc((void**)&d_a, sizeof(float)*N); hipMalloc((void**)&d_b, sizeof(float)*N); hipMalloc((void**)&d_c, sizeof(float)*N); /* Compute execution configuration */ dim3 dimBlock(bsize); dim3 dimGrid(N/dimBlock.x ); if( N % dimBlock.x != 0 ) dimGrid.x+=1; printf(" using %d threads per block, %d blocks\n",dimBlock.x,dimGrid.x); /* Initialize memory on the device */ hipLaunchKernelGGL(( set_array), dim3(dimGrid),dim3(dimBlock), 0, 0, d_a, 2.f, N); hipLaunchKernelGGL(( set_array), dim3(dimGrid),dim3(dimBlock), 0, 0, d_b, .5f, N); hipLaunchKernelGGL(( set_array), dim3(dimGrid),dim3(dimBlock), 0, 0, d_c, .5f, N); /* --- MAIN LOOP --- repeat test cases NTIMES times --- */ scalar=3.0f; for (k=0; k<NTIMES; k++) { times[0][k]= mysecond(); hipLaunchKernelGGL(( STREAM_Copy), dim3(dimGrid),dim3(dimBlock), 0, 0, d_a, d_c, N); hipDeviceSynchronize(); times[0][k]= mysecond() - times[0][k]; times[1][k]= mysecond(); hipLaunchKernelGGL(( STREAM_Scale), dim3(dimGrid),dim3(dimBlock), 0, 0, d_b, d_c, scalar, N); hipDeviceSynchronize(); times[1][k]= mysecond() - times[1][k]; times[2][k]= mysecond(); hipLaunchKernelGGL(( STREAM_Add), dim3(dimGrid),dim3(dimBlock), 0, 0, d_a, d_b, d_c, N); hipDeviceSynchronize(); times[2][k]= mysecond() - times[2][k]; times[3][k]= mysecond(); hipLaunchKernelGGL(( STREAM_Triad), dim3(dimGrid),dim3(dimBlock), 0, 0, d_b, d_c, d_a, scalar, N); hipDeviceSynchronize(); times[3][k]= mysecond() - times[3][k]; } static double bytes[4] = { 2 * sizeof(float) * N, 2 * sizeof(float) * N, 3 * sizeof(float) * N, 3 * sizeof(float) * N }; /* --- SUMMARY --- */ for (k=1; k<NTIMES; k++) /* note -- skip first iteration */ { for (j=0; j<4; j++) { avgtime[j] = avgtime[j] + times[j][k]; //mintime[j] = MIN(mintime[j], times[j][k]); //maxtime[j] = MAX(maxtime[j], times[j][k]); } } printf("Function Rate (MB/s) Avg time\n"); for (j=0; j<4; j++) { avgtime[j] = avgtime[j]/(double)(NTIMES-1); printf("%s%11.8f %11.8f\n", label[j], 1.0E-06 * bytes[j]/avgtime[j], avgtime[j]); //mintime[j], //maxtime[j]); } /* Free memory on device */ hipFree(d_a); hipFree(d_b); hipFree(d_c); }
592eb797681d1b9870b417dbf2608f54fcf73327.cu
/* STREAM benchmark implementation in CUDA. COPY: a(i) = b(i) SCALE: a(i) = q*b(i) SUM: a(i) = b(i) + c(i) TRIAD: a(i) = b(i) + q*c(i) It measures the memory system on the device. The implementation is in single precision. Code based on the code developed by John D. McCalpin http://www.cs.virginia.edu/stream/FTP/Code/stream.c Written by: Massimiliano Fatica, NVIDIA Corporation */ //#define N 200000000 #define NTIMES 1000 #include <stdio.h> #include <float.h> #include <limits.h> #include <sys/time.h> #include <fstream> #include <iostream> #include <stdlib.h> #include <string.h> # ifndef MIN # define MIN(x,y) ((x)<(y)?(x):(y)) # endif # ifndef MAX # define MAX(x,y) ((x)>(y)?(x):(y)) # endif static double avgtime[4] = {0};//, maxtime[4] = {0}, //mintime[4] = {FLT_MAX,FLT_MAX,FLT_MAX,FLT_MAX}; const char *label[4] = {"Copy: ", "Scale: ", "Add: ", "Triad: "}; /* A gettimeofday routine to give access to the wall clock timer on most UNIX-like systems. */ double mysecond() { struct timeval tp; struct timezone tzp; int i = gettimeofday(&tp,&tzp); return ( (double) tp.tv_sec + (double) tp.tv_usec * 1.e-6 ); } __global__ void set_array(float *a, float value, int len) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < len) a[idx] = value; } __global__ void STREAM_Copy(float *a, float *b, int len) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < len) b[idx] = a[idx]; } __global__ void STREAM_Scale(float *a, float *b, float scale, int len) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < len) b[idx] = scale* a[idx]; } __global__ void STREAM_Add( float *a, float *b, float *c, int len) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < len) c[idx] = a[idx]+b[idx]; } __global__ void STREAM_Triad( float *a, float *b, float *c, float scalar, int len) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < len) c[idx] = a[idx]+scalar*b[idx]; } int main(int argc, const char* argv[]) { float *d_a, *d_b, *d_c; int j,k; double times[4][NTIMES]; float scalar; int N = 1000; int bsize = 128; for (j=1;j<argc;j++){ if (strcmp(argv[j],"-N") == 0) { j++; N = atoi(argv[j]); } if (strcmp(argv[j],"-B") == 0) { j++; bsize = atoi(argv[j]); } } printf(" STREAM Benchmark implementation in CUDA\n"); printf(" Array size (single precision)=%d\n",N); /* Allocate memory on device */ cudaMalloc((void**)&d_a, sizeof(float)*N); cudaMalloc((void**)&d_b, sizeof(float)*N); cudaMalloc((void**)&d_c, sizeof(float)*N); /* Compute execution configuration */ dim3 dimBlock(bsize); dim3 dimGrid(N/dimBlock.x ); if( N % dimBlock.x != 0 ) dimGrid.x+=1; printf(" using %d threads per block, %d blocks\n",dimBlock.x,dimGrid.x); /* Initialize memory on the device */ set_array<<<dimGrid,dimBlock>>>(d_a, 2.f, N); set_array<<<dimGrid,dimBlock>>>(d_b, .5f, N); set_array<<<dimGrid,dimBlock>>>(d_c, .5f, N); /* --- MAIN LOOP --- repeat test cases NTIMES times --- */ scalar=3.0f; for (k=0; k<NTIMES; k++) { times[0][k]= mysecond(); STREAM_Copy<<<dimGrid,dimBlock>>>(d_a, d_c, N); cudaThreadSynchronize(); times[0][k]= mysecond() - times[0][k]; times[1][k]= mysecond(); STREAM_Scale<<<dimGrid,dimBlock>>>(d_b, d_c, scalar, N); cudaThreadSynchronize(); times[1][k]= mysecond() - times[1][k]; times[2][k]= mysecond(); STREAM_Add<<<dimGrid,dimBlock>>>(d_a, d_b, d_c, N); cudaThreadSynchronize(); times[2][k]= mysecond() - times[2][k]; times[3][k]= mysecond(); STREAM_Triad<<<dimGrid,dimBlock>>>(d_b, d_c, d_a, scalar, N); cudaThreadSynchronize(); times[3][k]= mysecond() - times[3][k]; } static double bytes[4] = { 2 * sizeof(float) * N, 2 * sizeof(float) * N, 3 * sizeof(float) * N, 3 * sizeof(float) * N }; /* --- SUMMARY --- */ for (k=1; k<NTIMES; k++) /* note -- skip first iteration */ { for (j=0; j<4; j++) { avgtime[j] = avgtime[j] + times[j][k]; //mintime[j] = MIN(mintime[j], times[j][k]); //maxtime[j] = MAX(maxtime[j], times[j][k]); } } printf("Function Rate (MB/s) Avg time\n"); for (j=0; j<4; j++) { avgtime[j] = avgtime[j]/(double)(NTIMES-1); printf("%s%11.8f %11.8f\n", label[j], 1.0E-06 * bytes[j]/avgtime[j], avgtime[j]); //mintime[j], //maxtime[j]); } /* Free memory on device */ cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); }
7d5b3c985ff729940bb9d4b53b99b5a2bef9f3cb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Matrix multiplication: P = M * N. * Device code. */ #ifndef _MATRIXMUL_KERNEL_H_ #define _MATRIXMUL_KERNEL_H_ #include <stdio.h> #include "matrixmul.h" // Matrix multiplication kernel thread specification __global__ void MatrixMulKernel(Matrix M, Matrix N, Matrix P) { //Multiply the two matrices // Calcullate row index of matrix of M and P unsigned int Row = threadIdx.y + (blockIdx.y * blockDim.y); // Calcullate column index of matrix N and P unsigned int Col = threadIdx.x + (blockIdx.x * blockDim.x); if((Row < MATRIX_SIZE) && (Col < MATRIX_SIZE)) // Since Width=MATRIX_SIZE { float Pvalue = 0; // Each thread computes one element of block sub-matrix for(unsigned int k=0; k < MATRIX_SIZE; ++k) // Again Width=MATRIX_SIZE here { Pvalue += M.elements[Row * MATRIX_SIZE + k] * N.elements[k * MATRIX_SIZE + Col]; // Both M and N elements are stored in Row-major layout } P.elements[Row*MATRIX_SIZE + Col] = Pvalue; } } #endif // #ifndef _MATRIXMUL_KERNEL_H_
7d5b3c985ff729940bb9d4b53b99b5a2bef9f3cb.cu
/* Matrix multiplication: P = M * N. * Device code. */ #ifndef _MATRIXMUL_KERNEL_H_ #define _MATRIXMUL_KERNEL_H_ #include <stdio.h> #include "matrixmul.h" // Matrix multiplication kernel thread specification __global__ void MatrixMulKernel(Matrix M, Matrix N, Matrix P) { //Multiply the two matrices // Calcullate row index of matrix of M and P unsigned int Row = threadIdx.y + (blockIdx.y * blockDim.y); // Calcullate column index of matrix N and P unsigned int Col = threadIdx.x + (blockIdx.x * blockDim.x); if((Row < MATRIX_SIZE) && (Col < MATRIX_SIZE)) // Since Width=MATRIX_SIZE { float Pvalue = 0; // Each thread computes one element of block sub-matrix for(unsigned int k=0; k < MATRIX_SIZE; ++k) // Again Width=MATRIX_SIZE here { Pvalue += M.elements[Row * MATRIX_SIZE + k] * N.elements[k * MATRIX_SIZE + Col]; // Both M and N elements are stored in Row-major layout } P.elements[Row*MATRIX_SIZE + Col] = Pvalue; } } #endif // #ifndef _MATRIXMUL_KERNEL_H_
8c59ce5a917bcfab5c87f3341550937e3bfaaa66.hip
// !!! This is a file automatically generated by hipify!!! #include <math.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <float.h> #include "hip/hip_complex.h" #include "time.h" #include "hip/hip_runtime.h" #include <iostream> using namespace std; struct test { float vu[2]; float w; float visibility_x; float visibility_y; float empt[3]; } ; __global__ void test(struct test * vis, int vis_entries, int vis_per_block, hipComplex *convFunc, int support, int sampling, int wplanes, float wpow2increment, hipComplex *output, int dim_row, int dim_col, float inc_row, float inc_col) { __shared__ struct test2 { struct test testing; } tt; support = support / 2; // Let's first see which visibility data shall be taken care of __shared__ float pos_y,pos_x; __shared__ int loc_y, loc_x; __shared__ int off_y, off_x; __shared__ int wplane; float tmp; hipComplex add; int begin = (blockIdx.y*vis_per_block); int end = begin + vis_per_block - 5; if (begin>=vis_entries) return; //This block is useless if (end>=vis_entries) end = vis_entries - 1; for (int vis_no = begin; vis_no <= end; vis_no++) { __syncthreads(); if ((threadIdx.x == 0) && (threadIdx.y == 0)) { tt= *(struct test2*) (vis + vis_no); // We first find in which wplane point is wplane = (int) round(sqrtf(fabs(wpow2increment * tt.testing.w))); //still need to understand the +1 abd offset.. I beleive it is 0 if ( wplane > (wplanes -1) ) wplane = wplanes - 1; pos_y=(tt.testing.vu[0] / inc_row) + __int2float_rz(dim_row / 2); pos_x=(tt.testing.vu[1] / inc_col) + __int2float_rz(dim_col / 2); loc_y = __float2int_rz(pos_y); loc_x = __float2int_rz(pos_x); off_y = __float2int_rz(__int2float_rz(loc_y-pos_y)* __int2float_rz(sampling)); off_x = __float2int_rz(__int2float_rz(loc_x-pos_x)* __int2float_rz(sampling)); } __syncthreads(); if (((loc_y-support)<0) || ((loc_y+support)>(dim_row-1)) || ((loc_x-support)<0) || ((loc_x+support)>(dim_col-1))) continue; /// out of grid //continue; for (int iy = (-support + threadIdx.y); //*17 iy <= support; iy += blockDim.y) { // int ix_orig=(-support+threadIdx.x); // int ix=ix_orig; // ix -= (loc_x + ix - threadIdx.x) % 64; //Important for (int ix = (-support + threadIdx.x); //*17 ix <= support; ix += blockDim.x) { // if (ix < ix_orig) continue; int newloc_y=loc_y+iy; int newloc_x=loc_x+ix; //newloc[0]=loc[0]+iy; //newloc[1]=loc[1]+ix; //loc_y+=iy; //loc_x+=ix; int poss=(newloc_y)*dim_col+newloc_x; // poss=0; //int iloc_y=(sampling*(iy+support))+(sampling-1)+off[0]; // int iloc_x=(sampling*(ix+support/)+(sampling-1)+off[1]; int iloc_y=(off_y+(support))*sampling+(iy+support); int iloc_x=(off_x+(support))*sampling+(ix+support); //REVIEW above.... worthed to do an interesting note //if ((iloc_x<0)||(iloc_y<0)) continue; int convplaneside=(support*2+1)*sampling-1; //if ((iloc_x>convplaneside)||(iloc_y>convplaneside)) continue; hipComplex addvalue; int pss2=wplane*convplaneside*convplaneside+(iloc_y*convplaneside+iloc_x); // if (threadIdx.x>100000) printf("%d",pss2); //pss2-=(pss2-threadIdx.x)%32; // no real big difference //hipComplex wt=*(convFunc+ pss2); //To change for convFunc //To CHECK... Do we expect complex values hipComplex wt; wt.x=1.0f; wt.y=1.0f; addvalue.x = tt.testing.visibility_x * wt.x-tt.testing.visibility_y* wt.y; addvalue.y = tt.testing.visibility_y * wt.x+tt.testing.visibility_x* wt.y; //can be improved add.x+=addvalue.x; add.y+=addvalue.y; (output+poss)->x=addvalue.x; (output+poss)->y=addvalue.y; //// output[threadIdx.y * blockDim.x + threadIdx.x] = addvalue; // tmp+=addvalue.x+addvalue.y; // atomicAdd(&((output+poss)->x),addvalue.x); // atomicAdd(&((output+poss)->y),addvalue.y); // // } } //atomicAdd(&(output)->x,tmp); } // __syncthreads(); if(threadIdx.x == 0 && threadIdx.y == 0) output[blockIdx.y].x=tmp; // atomicAdd(&((output)->x),tmp); // atomicAdd(&((output)->y),add.y); } int main(int argc, char *argv[]) { float timestamp; float *data; hipComplex* grid; hipEvent_t event_start,event_stop; // Initialise hipSetDevice(0); hipDeviceSetCacheConfig(hipFuncCachePreferShared); // Allocate and generate buffers hipMalloc((void **) &data, 240000 * 8 * sizeof(float)); hipMalloc((void **) &grid, 2016* 2016 * sizeof(hipComplex)); hipMemset(data, 0, 240000 * 8 * sizeof(float)); hipEventCreate(&event_start); hipEventCreate(&event_stop); cout <<"Allocation ready"<<endl; hipEventRecord(event_start, 0); dim3 threadsPerBlock; dim3 blocks; int b=1000; threadsPerBlock.x=32; threadsPerBlock.y=32; threadsPerBlock.z=1; blocks.x=1; blocks.y=24000; blocks.z=1; hipLaunchKernelGGL(( test), dim3(blocks),dim3(threadsPerBlock), 0, 0, (struct test*)data, 240000, 240000 / blocks.y, // b_x, b_y, NULL, 529, 4, 128, 1.2, grid, 2016,2016, 6.56984f,6.56984f ); hipEventRecord(event_stop, 0); cout << "Now waiting"<<endl; cout.flush(); hipEventSynchronize(event_stop); hipEventElapsedTime(&timestamp, event_start, event_stop); printf("Calculated in %f\n", timestamp); }
8c59ce5a917bcfab5c87f3341550937e3bfaaa66.cu
#include <math.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <float.h> #include "cuComplex.h" #include "time.h" #include "cuda_runtime.h" #include <iostream> using namespace std; struct test { float vu[2]; float w; float visibility_x; float visibility_y; float empt[3]; } ; __global__ void test(struct test * vis, int vis_entries, int vis_per_block, cuComplex *convFunc, int support, int sampling, int wplanes, float wpow2increment, cuComplex *output, int dim_row, int dim_col, float inc_row, float inc_col) { __shared__ struct test2 { struct test testing; } tt; support = support / 2; // Let's first see which visibility data shall be taken care of __shared__ float pos_y,pos_x; __shared__ int loc_y, loc_x; __shared__ int off_y, off_x; __shared__ int wplane; float tmp; cuComplex add; int begin = (blockIdx.y*vis_per_block); int end = begin + vis_per_block - 5; if (begin>=vis_entries) return; //This block is useless if (end>=vis_entries) end = vis_entries - 1; for (int vis_no = begin; vis_no <= end; vis_no++) { __syncthreads(); if ((threadIdx.x == 0) && (threadIdx.y == 0)) { tt= *(struct test2*) (vis + vis_no); // We first find in which wplane point is wplane = (int) round(sqrtf(fabs(wpow2increment * tt.testing.w))); //still need to understand the +1 abd offset.. I beleive it is 0 if ( wplane > (wplanes -1) ) wplane = wplanes - 1; pos_y=(tt.testing.vu[0] / inc_row) + __int2float_rz(dim_row / 2); pos_x=(tt.testing.vu[1] / inc_col) + __int2float_rz(dim_col / 2); loc_y = __float2int_rz(pos_y); loc_x = __float2int_rz(pos_x); off_y = __float2int_rz(__int2float_rz(loc_y-pos_y)* __int2float_rz(sampling)); off_x = __float2int_rz(__int2float_rz(loc_x-pos_x)* __int2float_rz(sampling)); } __syncthreads(); if (((loc_y-support)<0) || ((loc_y+support)>(dim_row-1)) || ((loc_x-support)<0) || ((loc_x+support)>(dim_col-1))) continue; /// out of grid //continue; for (int iy = (-support + threadIdx.y); //*17 iy <= support; iy += blockDim.y) { // int ix_orig=(-support+threadIdx.x); // int ix=ix_orig; // ix -= (loc_x + ix - threadIdx.x) % 64; //Important for (int ix = (-support + threadIdx.x); //*17 ix <= support; ix += blockDim.x) { // if (ix < ix_orig) continue; int newloc_y=loc_y+iy; int newloc_x=loc_x+ix; //newloc[0]=loc[0]+iy; //newloc[1]=loc[1]+ix; //loc_y+=iy; //loc_x+=ix; int poss=(newloc_y)*dim_col+newloc_x; // poss=0; //int iloc_y=(sampling*(iy+support))+(sampling-1)+off[0]; // int iloc_x=(sampling*(ix+support/)+(sampling-1)+off[1]; int iloc_y=(off_y+(support))*sampling+(iy+support); int iloc_x=(off_x+(support))*sampling+(ix+support); //REVIEW above.... worthed to do an interesting note //if ((iloc_x<0)||(iloc_y<0)) continue; int convplaneside=(support*2+1)*sampling-1; //if ((iloc_x>convplaneside)||(iloc_y>convplaneside)) continue; cuComplex addvalue; int pss2=wplane*convplaneside*convplaneside+(iloc_y*convplaneside+iloc_x); // if (threadIdx.x>100000) printf("%d",pss2); //pss2-=(pss2-threadIdx.x)%32; // no real big difference //cuComplex wt=*(convFunc+ pss2); //To change for convFunc //To CHECK... Do we expect complex values cuComplex wt; wt.x=1.0f; wt.y=1.0f; addvalue.x = tt.testing.visibility_x * wt.x-tt.testing.visibility_y* wt.y; addvalue.y = tt.testing.visibility_y * wt.x+tt.testing.visibility_x* wt.y; //can be improved add.x+=addvalue.x; add.y+=addvalue.y; (output+poss)->x=addvalue.x; (output+poss)->y=addvalue.y; //// output[threadIdx.y * blockDim.x + threadIdx.x] = addvalue; // tmp+=addvalue.x+addvalue.y; // atomicAdd(&((output+poss)->x),addvalue.x); // atomicAdd(&((output+poss)->y),addvalue.y); // // } } //atomicAdd(&(output)->x,tmp); } // __syncthreads(); if(threadIdx.x == 0 && threadIdx.y == 0) output[blockIdx.y].x=tmp; // atomicAdd(&((output)->x),tmp); // atomicAdd(&((output)->y),add.y); } int main(int argc, char *argv[]) { float timestamp; float *data; cuComplex* grid; cudaEvent_t event_start,event_stop; // Initialise cudaSetDevice(0); cudaThreadSetCacheConfig(cudaFuncCachePreferShared); // Allocate and generate buffers cudaMalloc((void **) &data, 240000 * 8 * sizeof(float)); cudaMalloc((void **) &grid, 2016* 2016 * sizeof(cuComplex)); cudaMemset(data, 0, 240000 * 8 * sizeof(float)); cudaEventCreate(&event_start); cudaEventCreate(&event_stop); cout <<"Allocation ready"<<endl; cudaEventRecord(event_start, 0); dim3 threadsPerBlock; dim3 blocks; int b=1000; threadsPerBlock.x=32; threadsPerBlock.y=32; threadsPerBlock.z=1; blocks.x=1; blocks.y=24000; blocks.z=1; test<<<blocks,threadsPerBlock>>> ((struct test*)data, 240000, 240000 / blocks.y, // b_x, b_y, NULL, 529, 4, 128, 1.2, grid, 2016,2016, 6.56984f,6.56984f ); cudaEventRecord(event_stop, 0); cout << "Now waiting"<<endl; cout.flush(); cudaEventSynchronize(event_stop); cudaEventElapsedTime(&timestamp, event_start, event_stop); printf("Calculated in %f\n", timestamp); }
d1dedf57e0c99c1cd3e505e92f7c091192b1a751.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "canberra_kernel_same.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const float *vg_a = NULL; hipMalloc(&vg_a, XSIZE*YSIZE); size_t pitch_a = 2; size_t n_a = XSIZE; const float *vg_b = NULL; hipMalloc(&vg_b, XSIZE*YSIZE); size_t pitch_b = 2; size_t n_b = YSIZE; size_t k = 1; float *d = NULL; hipMalloc(&d, XSIZE*YSIZE); size_t pitch_d = 2; float p = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( canberra_kernel_same), dim3(gridBlock),dim3(threadBlock), 0, 0, vg_a,pitch_a,n_a,vg_b,pitch_b,n_b,k,d,pitch_d,p); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( canberra_kernel_same), dim3(gridBlock),dim3(threadBlock), 0, 0, vg_a,pitch_a,n_a,vg_b,pitch_b,n_b,k,d,pitch_d,p); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( canberra_kernel_same), dim3(gridBlock),dim3(threadBlock), 0, 0, vg_a,pitch_a,n_a,vg_b,pitch_b,n_b,k,d,pitch_d,p); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
d1dedf57e0c99c1cd3e505e92f7c091192b1a751.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "canberra_kernel_same.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const float *vg_a = NULL; cudaMalloc(&vg_a, XSIZE*YSIZE); size_t pitch_a = 2; size_t n_a = XSIZE; const float *vg_b = NULL; cudaMalloc(&vg_b, XSIZE*YSIZE); size_t pitch_b = 2; size_t n_b = YSIZE; size_t k = 1; float *d = NULL; cudaMalloc(&d, XSIZE*YSIZE); size_t pitch_d = 2; float p = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); canberra_kernel_same<<<gridBlock,threadBlock>>>(vg_a,pitch_a,n_a,vg_b,pitch_b,n_b,k,d,pitch_d,p); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { canberra_kernel_same<<<gridBlock,threadBlock>>>(vg_a,pitch_a,n_a,vg_b,pitch_b,n_b,k,d,pitch_d,p); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { canberra_kernel_same<<<gridBlock,threadBlock>>>(vg_a,pitch_a,n_a,vg_b,pitch_b,n_b,k,d,pitch_d,p); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
2ab1c720334782920efa96180b50623b8d1a9852.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * FeatureSpace.cpp * */ /* Copyright (c) 2014 Luke Marcus Biagio Testa All rights reserved. Redistribution and use in source and binary forms are permitted provided that the above copyright notice and this paragraph are duplicated in all such forms and that any documentation, advertising materials, and other materials related to such distribution and use acknowledge that the software was developed by the Luke Marcus Biagio Testa. The name of the Luke Marcus Biagio Testa may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * */ #include "FeatureSpace.h" // ------------------ Constructors ---------------- // Parameter constructor takes in number of centroid FeatureSpace::FeatureSpace( const int K ) : CentroidSize(K), DataSize(0) { jbutil::randgen Rand( std::time(0) ); Centroids = new FeaturePoint[CentroidSize]; // Initialize Centroids at random positions and number of points per cluster to 0 for(int i = 0; i < CentroidSize; i++) { // Centroid Subset member is the number of points currently allocated to the cluster the centroid represents //FeaturePoint v( Rand.fval(LIMIT_L, LIMIT_U), Rand.fval(LIMIT_L, LIMIT_U), 0 ); FeaturePoint v( i, i, 0 ); Centroids[i] = v; } }; // ------------------ Operator Overloads ------------------- // Store data from file in X Y row format into point cloud std::istream& operator >>(std::istream& in, FeatureSpace& cloud) { cloud.DataSize++; // Create temporary point and initialize with 2D data from file FeaturePoint temp; in >> temp; // Copy 2D data into point cloud. Initialize point's cluster index to 0. cloud.PointCloud.push_back(temp); return in; } // ------------------ Destructors ----------------- FeatureSpace::~FeatureSpace() { //std::cout << "[DELETING] FeatureSpace" << std::endl; delete [] Centroids; } // ----------------- Member Functions -------------- // Print Data Point Cloud to Console Output void FeatureSpace::printData() { std::cout << "***** Printing Feature Space ******" << std::endl; std::cout << "DataSize: " << DataSize << std::endl; for(int i = 0; i < DataSize; i++) PointCloud[i].print(); } // Print Centroids to Console Output void FeatureSpace::printCentroids() { std::cout << "***** Printing Centroids *****" << std::endl; std::cout << "Number of Centroids: " << CentroidSize << std::endl; for(int i = 0; i < CentroidSize; i++) Centroids[i].print(); } FeaturePoint* FeatureSpace::getCentroids() { return Centroids; } // Get current Data points with cluster information std::vector<FeaturePoint> FeatureSpace::getDataPoints() { return PointCloud; } // Find nearest Centroid to data point. Store centroid's index against data point // Static Geometry members slice base class of FeaturePoint for processing //__global__ void SnapSubsets(double* ptr_centroids, double* ptr_data, double* ptr_cluster, long int DataSize, short int CentroidSize) __global__ void SnapSubsets(double* ptr_centroids, double* ptr_data, double* ptr_cluster, int DataSize, short int CentroidSize) { // Store previous centroid distance to point double distance, newDistance; long int id = ( threadIdx.x + (blockDim.x * blockIdx.x) ); // initialize distance as maximum value of double distance = DBL_MAX; // Protect against Threads accessing inexistent point if( id < DataSize) { for( int j = 0; j < CentroidSize; j++) { //a = (ptr_centroids[j*2] - ptr_data[(id*2)] ); //b = (ptr_centroids[(j*2) + 1] - ptr_data[(id*2) + 1] ); //newDistance = a*a + b*b; newDistance = ((ptr_centroids[j*2] - ptr_data[(id*2)] )*(ptr_centroids[j*2] - ptr_data[(id*2)] )) + ((ptr_centroids[(j*2) + 1] - ptr_data[(id*2) + 1] )*(ptr_centroids[(j*2) + 1] - ptr_data[(id*2) + 1] )); // If new distance found < previous distance, assign data point to cluster index j if( distance > newDistance ) { distance = newDistance; ptr_cluster[id] = j; } } } //__syncthreads(); }; // Partition feature space into clusters and recursively converge centroids until local minimum achieved int FeatureSpace::ClusterSearch() { double currEuclidean = 0, prevEuclidean; int iterations = 0; double* device_ptr_centroids_xy, *device_ptr_data_xy, *host_ptr_centroids_xy, *host_ptr_data_xy, *host_ptr_point_cluster, *device_ptr_point_cluster; // Cannot use member functions on GPU. Rearrange Centroid/Feature data into 1D float arrays host_ptr_data_xy = new double[DataSize*2]; host_ptr_point_cluster = new double[DataSize]; host_ptr_centroids_xy = new double[CentroidSize*2]; for(int i=0; i< DataSize; i++) { host_ptr_data_xy[2*i] = PointCloud[i].contents().first; host_ptr_data_xy[(2*i) + 1] = PointCloud[i].contents().second; host_ptr_point_cluster[i] = 0; } // Prepare GPU global memory for Feature Space. Success( hipMalloc( &device_ptr_centroids_xy, sizeof(double) * 2 * CentroidSize ) ); Success( hipMalloc( &device_ptr_data_xy, sizeof(double) * 2 * DataSize ) ); Success( hipMalloc( &device_ptr_point_cluster, sizeof(double) * DataSize ) ); // Feature Point [X Y] is constant. Copy Once to GPU once. Not used in constant memory as centroid [X Y] addresses are read more than any feature point address Success( hipMemcpy(device_ptr_data_xy, host_ptr_data_xy, sizeof(double) * 2 * DataSize, hipMemcpyHostToDevice) ); // Copy the cluster elements per point once. Success( hipMemcpy(device_ptr_point_cluster, host_ptr_point_cluster, sizeof(double) * DataSize, hipMemcpyHostToDevice) ); // Stores point data on constant memory. Requires 32 bytes per thread: 32*1024 = 32768 bytes //hipFuncSetCacheConfig(SnapSubsets, 2); // while points have not converged do { iterations++; // store cluster function output prevEuclidean = currEuclidean; currEuclidean = 0; // ------------------------------------ Parallel SnapSubsets ----------------------------------------- // Format updated centroid information and copy to GPU for(int i = 0; i < CentroidSize; i++) { host_ptr_centroids_xy[2*i] = Centroids[i].contents().first; host_ptr_centroids_xy[(2*i) + 1] = Centroids[i].contents().second; }; // Load new centroid positions to GPU Success( hipMemcpy(device_ptr_centroids_xy, host_ptr_centroids_xy, sizeof(double) * 2 * CentroidSize, hipMemcpyHostToDevice) ); // Only 52 bytes needed of register space for this function. No shared memory access // Launch maximum number of threads hipLaunchKernelGGL(( SnapSubsets) , dim3((DataSize/THREADS_PER_BLOCK)), dim3(THREADS_PER_BLOCK), 0, 0, device_ptr_centroids_xy, device_ptr_data_xy, device_ptr_point_cluster, DataSize, CentroidSize); //hipDeviceSynchronize(); hipError_t err = hipGetLastError(); if (err != hipSuccess) printf("Error: %s\n", hipGetErrorString(err)); // Extract Cluster each point assigned. Allocated on GPU with centroid distance measurements (1 way transfer) Success( hipMemcpy(host_ptr_point_cluster, device_ptr_point_cluster, sizeof(double) * DataSize, hipMemcpyDeviceToHost) ); for(int i=0; i<DataSize; i++) PointCloud[i].Cluster() = host_ptr_point_cluster[i]; // -------------------------------Continue As Per Usual-------------------------------------------------------- // Converge centroid to cluster mean UpdateCentroids(); // Find total sum of euclidean distances for( int i = 0; i < DataSize; i++) currEuclidean += Euclidean(Centroids[ PointCloud[i].Cluster() ],PointCloud[i] ); } while( currEuclidean != prevEuclidean ); // Clear GPU memory. Centroid and Point data is unchanged. hipFree(device_ptr_centroids_xy); hipFree(device_ptr_point_cluster); hipFree(device_ptr_data_xy); return iterations; }; void FeatureSpace::Success(hipError_t err) const { if(err != hipSuccess) std::cout << "Error: " << err << std::endl; }
2ab1c720334782920efa96180b50623b8d1a9852.cu
/* * FeatureSpace.cpp * */ /* Copyright (c) 2014 Luke Marcus Biagio Testa All rights reserved. Redistribution and use in source and binary forms are permitted provided that the above copyright notice and this paragraph are duplicated in all such forms and that any documentation, advertising materials, and other materials related to such distribution and use acknowledge that the software was developed by the Luke Marcus Biagio Testa. The name of the Luke Marcus Biagio Testa may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * */ #include "FeatureSpace.h" // ------------------ Constructors ---------------- // Parameter constructor takes in number of centroid FeatureSpace::FeatureSpace( const int K ) : CentroidSize(K), DataSize(0) { jbutil::randgen Rand( std::time(0) ); Centroids = new FeaturePoint[CentroidSize]; // Initialize Centroids at random positions and number of points per cluster to 0 for(int i = 0; i < CentroidSize; i++) { // Centroid Subset member is the number of points currently allocated to the cluster the centroid represents //FeaturePoint v( Rand.fval(LIMIT_L, LIMIT_U), Rand.fval(LIMIT_L, LIMIT_U), 0 ); FeaturePoint v( i, i, 0 ); Centroids[i] = v; } }; // ------------------ Operator Overloads ------------------- // Store data from file in X Y row format into point cloud std::istream& operator >>(std::istream& in, FeatureSpace& cloud) { cloud.DataSize++; // Create temporary point and initialize with 2D data from file FeaturePoint temp; in >> temp; // Copy 2D data into point cloud. Initialize point's cluster index to 0. cloud.PointCloud.push_back(temp); return in; } // ------------------ Destructors ----------------- FeatureSpace::~FeatureSpace() { //std::cout << "[DELETING] FeatureSpace" << std::endl; delete [] Centroids; } // ----------------- Member Functions -------------- // Print Data Point Cloud to Console Output void FeatureSpace::printData() { std::cout << "***** Printing Feature Space ******" << std::endl; std::cout << "DataSize: " << DataSize << std::endl; for(int i = 0; i < DataSize; i++) PointCloud[i].print(); } // Print Centroids to Console Output void FeatureSpace::printCentroids() { std::cout << "***** Printing Centroids *****" << std::endl; std::cout << "Number of Centroids: " << CentroidSize << std::endl; for(int i = 0; i < CentroidSize; i++) Centroids[i].print(); } FeaturePoint* FeatureSpace::getCentroids() { return Centroids; } // Get current Data points with cluster information std::vector<FeaturePoint> FeatureSpace::getDataPoints() { return PointCloud; } // Find nearest Centroid to data point. Store centroid's index against data point // Static Geometry members slice base class of FeaturePoint for processing //__global__ void SnapSubsets(double* ptr_centroids, double* ptr_data, double* ptr_cluster, long int DataSize, short int CentroidSize) __global__ void SnapSubsets(double* ptr_centroids, double* ptr_data, double* ptr_cluster, int DataSize, short int CentroidSize) { // Store previous centroid distance to point double distance, newDistance; long int id = ( threadIdx.x + (blockDim.x * blockIdx.x) ); // initialize distance as maximum value of double distance = DBL_MAX; // Protect against Threads accessing inexistent point if( id < DataSize) { for( int j = 0; j < CentroidSize; j++) { //a = (ptr_centroids[j*2] - ptr_data[(id*2)] ); //b = (ptr_centroids[(j*2) + 1] - ptr_data[(id*2) + 1] ); //newDistance = a*a + b*b; newDistance = ((ptr_centroids[j*2] - ptr_data[(id*2)] )*(ptr_centroids[j*2] - ptr_data[(id*2)] )) + ((ptr_centroids[(j*2) + 1] - ptr_data[(id*2) + 1] )*(ptr_centroids[(j*2) + 1] - ptr_data[(id*2) + 1] )); // If new distance found < previous distance, assign data point to cluster index j if( distance > newDistance ) { distance = newDistance; ptr_cluster[id] = j; } } } //__syncthreads(); }; // Partition feature space into clusters and recursively converge centroids until local minimum achieved int FeatureSpace::ClusterSearch() { double currEuclidean = 0, prevEuclidean; int iterations = 0; double* device_ptr_centroids_xy, *device_ptr_data_xy, *host_ptr_centroids_xy, *host_ptr_data_xy, *host_ptr_point_cluster, *device_ptr_point_cluster; // Cannot use member functions on GPU. Rearrange Centroid/Feature data into 1D float arrays host_ptr_data_xy = new double[DataSize*2]; host_ptr_point_cluster = new double[DataSize]; host_ptr_centroids_xy = new double[CentroidSize*2]; for(int i=0; i< DataSize; i++) { host_ptr_data_xy[2*i] = PointCloud[i].contents().first; host_ptr_data_xy[(2*i) + 1] = PointCloud[i].contents().second; host_ptr_point_cluster[i] = 0; } // Prepare GPU global memory for Feature Space. Success( cudaMalloc( &device_ptr_centroids_xy, sizeof(double) * 2 * CentroidSize ) ); Success( cudaMalloc( &device_ptr_data_xy, sizeof(double) * 2 * DataSize ) ); Success( cudaMalloc( &device_ptr_point_cluster, sizeof(double) * DataSize ) ); // Feature Point [X Y] is constant. Copy Once to GPU once. Not used in constant memory as centroid [X Y] addresses are read more than any feature point address Success( cudaMemcpy(device_ptr_data_xy, host_ptr_data_xy, sizeof(double) * 2 * DataSize, cudaMemcpyHostToDevice) ); // Copy the cluster elements per point once. Success( cudaMemcpy(device_ptr_point_cluster, host_ptr_point_cluster, sizeof(double) * DataSize, cudaMemcpyHostToDevice) ); // Stores point data on constant memory. Requires 32 bytes per thread: 32*1024 = 32768 bytes //cudaFuncSetCacheConfig(SnapSubsets, 2); // while points have not converged do { iterations++; // store cluster function output prevEuclidean = currEuclidean; currEuclidean = 0; // ------------------------------------ Parallel SnapSubsets ----------------------------------------- // Format updated centroid information and copy to GPU for(int i = 0; i < CentroidSize; i++) { host_ptr_centroids_xy[2*i] = Centroids[i].contents().first; host_ptr_centroids_xy[(2*i) + 1] = Centroids[i].contents().second; }; // Load new centroid positions to GPU Success( cudaMemcpy(device_ptr_centroids_xy, host_ptr_centroids_xy, sizeof(double) * 2 * CentroidSize, cudaMemcpyHostToDevice) ); // Only 52 bytes needed of register space for this function. No shared memory access // Launch maximum number of threads SnapSubsets <<< (DataSize/THREADS_PER_BLOCK), THREADS_PER_BLOCK>>>(device_ptr_centroids_xy, device_ptr_data_xy, device_ptr_point_cluster, DataSize, CentroidSize); //cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(err)); // Extract Cluster each point assigned. Allocated on GPU with centroid distance measurements (1 way transfer) Success( cudaMemcpy(host_ptr_point_cluster, device_ptr_point_cluster, sizeof(double) * DataSize, cudaMemcpyDeviceToHost) ); for(int i=0; i<DataSize; i++) PointCloud[i].Cluster() = host_ptr_point_cluster[i]; // -------------------------------Continue As Per Usual-------------------------------------------------------- // Converge centroid to cluster mean UpdateCentroids(); // Find total sum of euclidean distances for( int i = 0; i < DataSize; i++) currEuclidean += Euclidean(Centroids[ PointCloud[i].Cluster() ],PointCloud[i] ); } while( currEuclidean != prevEuclidean ); // Clear GPU memory. Centroid and Point data is unchanged. cudaFree(device_ptr_centroids_xy); cudaFree(device_ptr_point_cluster); cudaFree(device_ptr_data_xy); return iterations; }; void FeatureSpace::Success(cudaError_t err) const { if(err != cudaSuccess) std::cout << "Error: " << err << std::endl; }
e83dc1ede1aea6fa3f1adf9785991e1fb0232c94.hip
// !!! This is a file automatically generated by hipify!!! #include "cuda_device.h" // Beginning of GPU Architecture definitions inline int _ConvertSMVer2Cores(int major, int minor) { // Defines for GPU Architecture types (using the SM version to determine // the # of cores per SM typedef struct { int SM; // 0xMm (hexidecimal notation), M = SM Major version, // and m = SM minor version int Cores; } sSMtoCores; sSMtoCores nGpuArchCoresPerSM[] = { {0x30, 192}, // Kepler Generation (SM 3.0) GK10x class {0x32, 192}, // Kepler Generation (SM 3.2) GK10x class {0x35, 192}, // Kepler Generation (SM 3.5) GK11x class {0x37, 192}, // Kepler Generation (SM 3.7) GK21x class {0x50, 128}, // Maxwell Generation (SM 5.0) GM10x class {0x52, 128}, // Maxwell Generation (SM 5.2) GM20x class {0x53, 128}, // Maxwell Generation (SM 5.3) GM20x class {0x60, 64}, // Pascal Generation (SM 6.0) GP100 class {0x61, 128}, // Pascal Generation (SM 6.1) GP10x class {0x62, 128}, // Pascal Generation (SM 6.2) GP10x class {0x70, 64}, // Volta Generation (SM 7.0) GV100 class {0x72, 64}, // Volta Generation (SM 7.2) GV11b class {-1, -1}}; int index = 0; while (nGpuArchCoresPerSM[index].SM != -1) { if (nGpuArchCoresPerSM[index].SM == ((major << 4) + minor)) { return nGpuArchCoresPerSM[index].Cores; } index++; } // If we don't find the values, we default use the previous one // to run properly printf( "MapSMtoCores for SM %d.%d is undefined." " Default to use %d Cores/SM\n", major, minor, nGpuArchCoresPerSM[index - 1].Cores); return nGpuArchCoresPerSM[index - 1].Cores; } void CheckDevice() { int32_t deviceCount = 0; CUDA_CHECK(hipGetDeviceCount(&deviceCount)); // This function call returns 0 if there are no CUDA capable devices. if (deviceCount == 0) { printf("There are no available device(s) that support CUDA\n"); return; } else { printf("host has:%d devices\n", deviceCount); } int32_t dev, driverVersion = 0, runtimeVersion = 0; for (dev = 0; dev < deviceCount; ++dev) { hipSetDevice(dev); hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, dev); printf("\nDevice %d: \"%s\"\n", dev, deviceProp.name); hipDriverGetVersion(&driverVersion); hipRuntimeGetVersion(&runtimeVersion); printf(" CUDA Driver Version / Runtime Version %d.%d / %d.%d\n", driverVersion / 1000, (driverVersion % 100) / 10, runtimeVersion / 1000, (runtimeVersion % 100) / 10); printf(" CUDA Capability Major/Minor version number: %d.%d\n", deviceProp.major, deviceProp.minor); char msg[256]; snprintf(msg, sizeof(msg), " Total amount of global memory: %.0f MBytes " "(%llu bytes)\n", static_cast<float>(deviceProp.totalGlobalMem / 1048576.0f), (unsigned long long)deviceProp.totalGlobalMem); printf("%s", msg); printf(" (%2d) Multiprocessors, (%3d) CUDA Cores/MP: %d CUDA Cores\n", deviceProp.multiProcessorCount, _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor), _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * deviceProp.multiProcessorCount); printf( " GPU Max Clock rate: %.0f MHz (%0.2f " "GHz)\n", deviceProp.clockRate * 1e-3f, deviceProp.clockRate * 1e-6f); printf(" Memory Clock rate: %.0f Mhz\n", deviceProp.memoryClockRate * 1e-3f); printf(" Memory Bus Width: %d-bit\n", deviceProp.memoryBusWidth); if (deviceProp.l2CacheSize) { printf(" L2 Cache Size: %d bytes\n", deviceProp.l2CacheSize); } printf( " Maximum Texture Dimension Size (x,y,z) 1D=(%d), 2D=(%d, " "%d), 3D=(%d, %d, %d)\n", deviceProp.maxTexture1D, deviceProp.maxTexture2D[0], deviceProp.maxTexture2D[1], deviceProp.maxTexture3D[0], deviceProp.maxTexture3D[1], deviceProp.maxTexture3D[2]); printf( " Maximum Layered 1D Texture Size, (num) layers 1D=(%d), %d layers\n", deviceProp.maxTexture1DLayered[0], deviceProp.maxTexture1DLayered[1]); printf( " Maximum Layered 2D Texture Size, (num) layers 2D=(%d, %d), %d " "layers\n", deviceProp.maxTexture2DLayered[0], deviceProp.maxTexture2DLayered[1], deviceProp.maxTexture2DLayered[2]); printf(" Total amount of constant memory: %lu bytes\n", deviceProp.totalConstMem); printf(" Total amount of shared memory per block: %lu bytes\n", deviceProp.sharedMemPerBlock); printf(" Total number of registers available per block: %d\n", deviceProp.regsPerBlock); printf(" Warp size: %d\n", deviceProp.warpSize); printf(" Maximum number of threads per multiprocessor: %d\n", deviceProp.maxThreadsPerMultiProcessor); printf(" Maximum number of threads per block: %d\n", deviceProp.maxThreadsPerBlock); printf(" Max dimension size of a thread block (x,y,z): (%d, %d, %d)\n", deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1], deviceProp.maxThreadsDim[2]); printf(" Max dimension size of a grid size (x,y,z): (%d, %d, %d)\n", deviceProp.maxGridSize[0], deviceProp.maxGridSize[1], deviceProp.maxGridSize[2]); printf(" Maximum memory pitch: %lu bytes\n", deviceProp.memPitch); printf(" Texture alignment: %lu bytes\n", deviceProp.textureAlignment); printf( " Concurrent copy and kernel execution: %s with %d copy " "engine(s)\n", (deviceProp.deviceOverlap ? "Yes" : "No"), deviceProp.asyncEngineCount); printf(" Run time limit on kernels: %s\n", deviceProp.kernelExecTimeoutEnabled ? "Yes" : "No"); printf(" Integrated GPU sharing Host Memory: %s\n", deviceProp.integrated ? "Yes" : "No"); printf(" Support host page-locked memory mapping: %s\n", deviceProp.canMapHostMemory ? "Yes" : "No"); printf(" Alignment requirement for Surfaces: %s\n", deviceProp.surfaceAlignment ? "Yes" : "No"); printf(" Device has ECC support: %s\n", deviceProp.ECCEnabled ? "Enabled" : "Disabled"); printf(" Device supports Unified Addressing (UVA): %s\n", deviceProp.unifiedAddressing ? "Yes" : "No"); printf(" Device supports Compute Preemption: %s\n", deviceProp.computePreemptionSupported ? "Yes" : "No"); printf(" Supports Cooperative Kernel Launch: %s\n", deviceProp.cooperativeLaunch ? "Yes" : "No"); printf(" Supports MultiDevice Co-op Kernel Launch: %s\n", deviceProp.cooperativeMultiDeviceLaunch ? "Yes" : "No"); printf(" Device PCI Domain ID / Bus ID / location ID: %d / %d / %d\n", deviceProp.pciDomainID, deviceProp.pciBusID, deviceProp.pciDeviceID); const char *sComputeMode[] = { "Default (multiple host threads can use ::hipSetDevice() with device " "simultaneously)", "Exclusive (only one host thread in one process is able to use " "::hipSetDevice() with this device)", "Prohibited (no host thread can use ::hipSetDevice() with this " "device)", "Exclusive Process (many threads in one process is able to use " "::hipSetDevice() with this device)", "Unknown", NULL}; printf(" Compute Mode:\n"); printf(" < %s >\n\n", sComputeMode[deviceProp.computeMode]); } }
e83dc1ede1aea6fa3f1adf9785991e1fb0232c94.cu
#include "cuda_device.h" // Beginning of GPU Architecture definitions inline int _ConvertSMVer2Cores(int major, int minor) { // Defines for GPU Architecture types (using the SM version to determine // the # of cores per SM typedef struct { int SM; // 0xMm (hexidecimal notation), M = SM Major version, // and m = SM minor version int Cores; } sSMtoCores; sSMtoCores nGpuArchCoresPerSM[] = { {0x30, 192}, // Kepler Generation (SM 3.0) GK10x class {0x32, 192}, // Kepler Generation (SM 3.2) GK10x class {0x35, 192}, // Kepler Generation (SM 3.5) GK11x class {0x37, 192}, // Kepler Generation (SM 3.7) GK21x class {0x50, 128}, // Maxwell Generation (SM 5.0) GM10x class {0x52, 128}, // Maxwell Generation (SM 5.2) GM20x class {0x53, 128}, // Maxwell Generation (SM 5.3) GM20x class {0x60, 64}, // Pascal Generation (SM 6.0) GP100 class {0x61, 128}, // Pascal Generation (SM 6.1) GP10x class {0x62, 128}, // Pascal Generation (SM 6.2) GP10x class {0x70, 64}, // Volta Generation (SM 7.0) GV100 class {0x72, 64}, // Volta Generation (SM 7.2) GV11b class {-1, -1}}; int index = 0; while (nGpuArchCoresPerSM[index].SM != -1) { if (nGpuArchCoresPerSM[index].SM == ((major << 4) + minor)) { return nGpuArchCoresPerSM[index].Cores; } index++; } // If we don't find the values, we default use the previous one // to run properly printf( "MapSMtoCores for SM %d.%d is undefined." " Default to use %d Cores/SM\n", major, minor, nGpuArchCoresPerSM[index - 1].Cores); return nGpuArchCoresPerSM[index - 1].Cores; } void CheckDevice() { int32_t deviceCount = 0; CUDA_CHECK(cudaGetDeviceCount(&deviceCount)); // This function call returns 0 if there are no CUDA capable devices. if (deviceCount == 0) { printf("There are no available device(s) that support CUDA\n"); return; } else { printf("host has:%d devices\n", deviceCount); } int32_t dev, driverVersion = 0, runtimeVersion = 0; for (dev = 0; dev < deviceCount; ++dev) { cudaSetDevice(dev); cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, dev); printf("\nDevice %d: \"%s\"\n", dev, deviceProp.name); cudaDriverGetVersion(&driverVersion); cudaRuntimeGetVersion(&runtimeVersion); printf(" CUDA Driver Version / Runtime Version %d.%d / %d.%d\n", driverVersion / 1000, (driverVersion % 100) / 10, runtimeVersion / 1000, (runtimeVersion % 100) / 10); printf(" CUDA Capability Major/Minor version number: %d.%d\n", deviceProp.major, deviceProp.minor); char msg[256]; snprintf(msg, sizeof(msg), " Total amount of global memory: %.0f MBytes " "(%llu bytes)\n", static_cast<float>(deviceProp.totalGlobalMem / 1048576.0f), (unsigned long long)deviceProp.totalGlobalMem); printf("%s", msg); printf(" (%2d) Multiprocessors, (%3d) CUDA Cores/MP: %d CUDA Cores\n", deviceProp.multiProcessorCount, _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor), _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * deviceProp.multiProcessorCount); printf( " GPU Max Clock rate: %.0f MHz (%0.2f " "GHz)\n", deviceProp.clockRate * 1e-3f, deviceProp.clockRate * 1e-6f); printf(" Memory Clock rate: %.0f Mhz\n", deviceProp.memoryClockRate * 1e-3f); printf(" Memory Bus Width: %d-bit\n", deviceProp.memoryBusWidth); if (deviceProp.l2CacheSize) { printf(" L2 Cache Size: %d bytes\n", deviceProp.l2CacheSize); } printf( " Maximum Texture Dimension Size (x,y,z) 1D=(%d), 2D=(%d, " "%d), 3D=(%d, %d, %d)\n", deviceProp.maxTexture1D, deviceProp.maxTexture2D[0], deviceProp.maxTexture2D[1], deviceProp.maxTexture3D[0], deviceProp.maxTexture3D[1], deviceProp.maxTexture3D[2]); printf( " Maximum Layered 1D Texture Size, (num) layers 1D=(%d), %d layers\n", deviceProp.maxTexture1DLayered[0], deviceProp.maxTexture1DLayered[1]); printf( " Maximum Layered 2D Texture Size, (num) layers 2D=(%d, %d), %d " "layers\n", deviceProp.maxTexture2DLayered[0], deviceProp.maxTexture2DLayered[1], deviceProp.maxTexture2DLayered[2]); printf(" Total amount of constant memory: %lu bytes\n", deviceProp.totalConstMem); printf(" Total amount of shared memory per block: %lu bytes\n", deviceProp.sharedMemPerBlock); printf(" Total number of registers available per block: %d\n", deviceProp.regsPerBlock); printf(" Warp size: %d\n", deviceProp.warpSize); printf(" Maximum number of threads per multiprocessor: %d\n", deviceProp.maxThreadsPerMultiProcessor); printf(" Maximum number of threads per block: %d\n", deviceProp.maxThreadsPerBlock); printf(" Max dimension size of a thread block (x,y,z): (%d, %d, %d)\n", deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1], deviceProp.maxThreadsDim[2]); printf(" Max dimension size of a grid size (x,y,z): (%d, %d, %d)\n", deviceProp.maxGridSize[0], deviceProp.maxGridSize[1], deviceProp.maxGridSize[2]); printf(" Maximum memory pitch: %lu bytes\n", deviceProp.memPitch); printf(" Texture alignment: %lu bytes\n", deviceProp.textureAlignment); printf( " Concurrent copy and kernel execution: %s with %d copy " "engine(s)\n", (deviceProp.deviceOverlap ? "Yes" : "No"), deviceProp.asyncEngineCount); printf(" Run time limit on kernels: %s\n", deviceProp.kernelExecTimeoutEnabled ? "Yes" : "No"); printf(" Integrated GPU sharing Host Memory: %s\n", deviceProp.integrated ? "Yes" : "No"); printf(" Support host page-locked memory mapping: %s\n", deviceProp.canMapHostMemory ? "Yes" : "No"); printf(" Alignment requirement for Surfaces: %s\n", deviceProp.surfaceAlignment ? "Yes" : "No"); printf(" Device has ECC support: %s\n", deviceProp.ECCEnabled ? "Enabled" : "Disabled"); printf(" Device supports Unified Addressing (UVA): %s\n", deviceProp.unifiedAddressing ? "Yes" : "No"); printf(" Device supports Compute Preemption: %s\n", deviceProp.computePreemptionSupported ? "Yes" : "No"); printf(" Supports Cooperative Kernel Launch: %s\n", deviceProp.cooperativeLaunch ? "Yes" : "No"); printf(" Supports MultiDevice Co-op Kernel Launch: %s\n", deviceProp.cooperativeMultiDeviceLaunch ? "Yes" : "No"); printf(" Device PCI Domain ID / Bus ID / location ID: %d / %d / %d\n", deviceProp.pciDomainID, deviceProp.pciBusID, deviceProp.pciDeviceID); const char *sComputeMode[] = { "Default (multiple host threads can use ::cudaSetDevice() with device " "simultaneously)", "Exclusive (only one host thread in one process is able to use " "::cudaSetDevice() with this device)", "Prohibited (no host thread can use ::cudaSetDevice() with this " "device)", "Exclusive Process (many threads in one process is able to use " "::cudaSetDevice() with this device)", "Unknown", NULL}; printf(" Compute Mode:\n"); printf(" < %s >\n\n", sComputeMode[deviceProp.computeMode]); } }
b7b1345a52cd53c976b824b585ebd70e40d7baa7.hip
// !!! This is a file automatically generated by hipify!!! /******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // #include "testlayers.h" #include <NDArray.h> #include <NDArrayFactory.h> #include <Context.h> #include <Node.h> #include <graph/Variable.h> #include <graph/VariableSpace.h> #include <execution/LaunchContext.h> #include <specials_cuda.h> #include <TAD.h> #include <ops/declarable/CustomOperations.h> #include <hip/hip_runtime.h> using namespace nd4j; using namespace nd4j::graph; class NDArrayCudaBasicsTests : public testing::Test { public: }; ////////////////////////////////////////////////////////////////////////// static hipError_t allocateDeviceMem(LaunchContext& lc, std::vector<void*>& devicePtrs, const std::vector<std::pair<void*,size_t>>& hostData) { if(devicePtrs.size() != hostData.size()) throw std::invalid_argument("prepareDataForCuda: two input sts::vectors should same sizes !"); hipError_t cudaResult; void* reductionPointer; cudaResult = hipMalloc(reinterpret_cast<void **>(&reductionPointer), 1024*1024); if(cudaResult != 0) return cudaResult; int* allocationPointer; cudaResult = hipMalloc(reinterpret_cast<void **>(&allocationPointer), 1024*1024); if(cudaResult != 0) return cudaResult; lc.setReductionPointer(reductionPointer); lc.setAllocationPointer(allocationPointer); hipStream_t stream = *lc.getCudaStream(); for(int i = 0; i < devicePtrs.size(); ++i) { cudaResult = hipMalloc(reinterpret_cast<void **>(&devicePtrs[i]), hostData[i].second); if(cudaResult != 0) return cudaResult; hipMemcpyAsync(devicePtrs[i], hostData[i].first, hostData[i].second, hipMemcpyHostToDevice, stream); } return cudaResult; } TEST_F(NDArrayCudaBasicsTests, Test_Registration_1) { auto x = NDArrayFactory::create<int>('c', {5}, {1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<int>('c', {5}, {5, 4, 3, 2, 1}); ASSERT_TRUE(x.isActualOnDeviceSide()); ASSERT_FALSE(x.isActualOnHostSide()); } TEST_F(NDArrayCudaBasicsTests, Test_Registration_2) { auto x = NDArrayFactory::create<int>('c', {5}); auto y = NDArrayFactory::create<int>('c', {5}); ASSERT_TRUE(x.isActualOnDeviceSide()); ASSERT_FALSE(x.isActualOnHostSide()); } TEST_F(NDArrayCudaBasicsTests, Test_Registration_3) { auto x = NDArrayFactory::create<int>('c', {5}, {1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<int>('c', {5}, {5, 4, 3, 2, 1}); ASSERT_TRUE(x.isActualOnDeviceSide()); ASSERT_FALSE(x.isActualOnHostSide()); NDArray::registerSpecialUse({&x}, {&y}); ASSERT_TRUE(x.isActualOnDeviceSide()); ASSERT_FALSE(x.isActualOnHostSide()); ASSERT_TRUE(y.isActualOnDeviceSide()); ASSERT_FALSE(y.isActualOnHostSide()); } TEST_F(NDArrayCudaBasicsTests, Test_Registration_01) { auto x = NDArrayFactory::create_<int>('c', {5}, {1, 2, 3, 4, 5}); auto y = NDArrayFactory::create_<int>('c', {5}, {5, 4, 3, 2, 1}); ASSERT_TRUE(x->isActualOnDeviceSide()); ASSERT_FALSE(x->isActualOnHostSide()); delete x; delete y; } TEST_F(NDArrayCudaBasicsTests, Test_Registration_02) { auto x = NDArrayFactory::create_<int>('c', {5}); auto y = NDArrayFactory::create_<int>('c', {5}); ASSERT_TRUE(x->isActualOnDeviceSide()); ASSERT_FALSE(x->isActualOnHostSide()); delete x; delete y; } TEST_F(NDArrayCudaBasicsTests, Test_Registration_03) { auto x = NDArrayFactory::create_<int>('c', {5}, {1, 2, 3, 4, 5}); auto y = NDArrayFactory::create_<int>('c', {5}, {5, 4, 3, 2, 1}); ASSERT_TRUE(x->isActualOnDeviceSide()); ASSERT_FALSE(x->isActualOnHostSide()); NDArray::registerSpecialUse({y}, {x}); x->applyTransform(transform::Neg, *y); //ASSERT_TRUE(x->isActualOnDeviceSide()); //ASSERT_FALSE(x->isActualOnHostSide()); //ASSERT_TRUE(y->isActualOnDeviceSide()); //ASSERT_TRUE(y->isActualOnHostSide()); //y->syncToHost(); // y->printBuffer("Negatives"); delete x; delete y; } TEST_F(NDArrayCudaBasicsTests, Test_Cosine_1) { auto x = NDArrayFactory::create_<double>('c', {5}, {1, 2, 3, 4, 5}); auto y = NDArrayFactory::create_<double>('c', {5}, {5, 4, 3, 2, 1}); ASSERT_TRUE(x->isActualOnDeviceSide()); ASSERT_FALSE(x->isActualOnHostSide()); NDArray::registerSpecialUse({y}, {x}); x->applyTransform(transform::Cosine, *y); //ASSERT_TRUE(x->isActualOnDeviceSide()); //ASSERT_FALSE(x->isActualOnHostSide()); //ASSERT_TRUE(y->isActualOnDeviceSide()); //ASSERT_TRUE(y->isActualOnHostSide()); //y->syncToHost(); delete x; delete y; } ////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, TestAdd_1) { // allocating host-side arrays auto x = NDArrayFactory::create<double>('c', { 5 }, { 1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<double>('c', { 5 }, { 1, 2, 3, 4, 5}); auto z = NDArrayFactory::create<double>('c', { 5 }, {10, 10, 10, 10, 10}); auto exp = NDArrayFactory::create<double>('c', { 5 }, { 2, 4, 6, 8, 10 }); // making raw buffers //Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX; //hipError_t res = hipMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = hipMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = hipMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); //ASSERT_EQ(0, res); Nd4jPointer nativeStream = (Nd4jPointer)malloc(sizeof(hipStream_t)); CHECK_ALLOC(nativeStream, "Failed to allocate memory for new CUDA stream", sizeof(hipStream_t)); hipError_t dZ = hipStreamCreate(reinterpret_cast<hipStream_t *>(&nativeStream)); auto stream = reinterpret_cast<hipStream_t *>(&nativeStream); //hipMemcpyAsync(devBufferPtrX, x.buffer(), x.lengthOf() * x.sizeOfT(), hipMemcpyHostToDevice, *stream); //hipMemcpyAsync(devShapePtrX, x.shapeInfo(), shape::shapeInfoByteLength(x.shapeInfo()), hipMemcpyHostToDevice, *stream); LaunchContext lc(stream, nullptr, nullptr); NativeOpExecutioner::execPairwiseTransform(&lc, pairwise::Add, x.buffer(), x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), y.buffer(), y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo(), z.buffer(), z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), nullptr); z.tickWriteDevice(); auto res = hipStreamSynchronize(*stream); ASSERT_EQ(0, res); for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); } ////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, TestAdd_2) { // allocating host-side arrays NDArray x('c', { 5 }, { 1, 2, 3, 4, 5}); NDArray y('c', { 5 }, { 1, 2, 3, 4, 5}); NDArray z('c', { 5 }, nd4j::DataType::DOUBLE); NDArray exp('c', { 5 }, { 2, 4, 6, 8, 10 }); Nd4jPointer nativeStream = (Nd4jPointer)malloc(sizeof(hipStream_t)); CHECK_ALLOC(nativeStream, "Failed to allocate memory for new CUDA stream", sizeof(hipStream_t)); hipError_t dZ = hipStreamCreate(reinterpret_cast<hipStream_t *>(&nativeStream)); auto stream = reinterpret_cast<hipStream_t *>(&nativeStream); LaunchContext lc(stream, *stream, nullptr, nullptr); NativeOpExecutioner::execPairwiseTransform(&lc, pairwise::Add, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), nullptr); auto res = hipStreamSynchronize(*stream); ASSERT_EQ(0, res); for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); } ////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, TestAdd_3) { // allocating host-side arrays auto x = NDArrayFactory::create<double>('c', { 5 }, { 1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<double>('c', { 5 }, { 1, 2, 3, 4, 5}); auto z = NDArrayFactory::create<double>('c', { 5 }, {10, 10, 10, 10, 10}); auto exp = NDArrayFactory::create<double>('c', { 5 }, { 2, 4, 6, 8, 10 }); // making raw buffers //Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX; //hipError_t res = hipMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = hipMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = hipMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); //ASSERT_EQ(0, res); Nd4jPointer nativeStream = (Nd4jPointer)malloc(sizeof(hipStream_t)); CHECK_ALLOC(nativeStream, "Failed to allocate memory for new CUDA stream", sizeof(hipStream_t)); hipError_t dZ = hipStreamCreate(reinterpret_cast<hipStream_t *>(&nativeStream)); auto stream = reinterpret_cast<hipStream_t *>(&nativeStream); //hipMemcpyAsync(devBufferPtrX, x.buffer(), x.lengthOf() * x.sizeOfT(), hipMemcpyHostToDevice, *stream); //hipMemcpyAsync(devShapePtrX, x.shapeInfo(), shape::shapeInfoByteLength(x.shapeInfo()), hipMemcpyHostToDevice, *stream); LaunchContext lc(stream, *stream, nullptr, nullptr); NativeOpExecutioner::execPairwiseTransform(&lc, pairwise::Add, x.buffer(), x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), y.buffer(), y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo(), z.buffer(), z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), nullptr); z.tickWriteDevice(); auto res = hipStreamSynchronize(*stream); ASSERT_EQ(0, res); //double* localBuffer = ; z.syncToHost(); hipMemcpy(z.buffer(), z.specialBuffer(), z.lengthOf() * z.sizeOfT(), hipMemcpyDeviceToHost); res = hipStreamSynchronize(*stream); z.tickWriteHost(); ASSERT_EQ(0, res); // // hipFree(devBufferPtrX); //hipFree(devBufferPtrZ); //hipFree(devShapePtrX); for (int e = 0; e < z.lengthOf(); e++) { ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); } } ////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, TestAdd_4) { // allocating host-side arrays auto x = NDArrayFactory::create<double>('c', { 5 }, { 1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<double>('c', { 5 }, { 1, 2, 3, 4, 5}); auto z = NDArrayFactory::create<double>('c', { 5 }); auto exp = NDArrayFactory::create<double>('c', { 5 }, { 2, 4, 6, 8, 10 }); // making raw buffers //Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX; //hipError_t res = hipMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = hipMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = hipMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); //ASSERT_EQ(0, res); x.applyPairwiseTransform(pairwise::Add, y, z); // // hipFree(devBufferPtrX); //hipFree(devBufferPtrZ); //hipFree(devShapePtrX); for (int e = 0; e < z.lengthOf(); e++) { ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); } } ////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, TestAdd_5) { // allocating host-side arrays auto x = NDArrayFactory::create<double>('c', { 5 }, { 1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<double>('c', { 5 }, { 1, 2, 3, 4, 5}); //auto z = NDArrayFactory::create<double>('c', { 5 }); auto exp = NDArrayFactory::create<double>('c', { 5 }, { 2, 4, 6, 8, 10 }); // making raw buffers //Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX; //hipError_t res = hipMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = hipMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = hipMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); //ASSERT_EQ(0, res); x += y; //x.applyPairwiseTransform(pairwise::Add, &y, &z, nullptr); x.syncToHost(); //y.printBuffer("3Y = "); //z.printBuffer("3Result out"); // // hipFree(devBufferPtrX); //hipFree(devBufferPtrZ); //hipFree(devShapePtrX); for (int e = 0; e < x.lengthOf(); e++) { ASSERT_NEAR(exp.e<double>(e), x.e<double>(e), 1e-5); } } ////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, TestAdd_6) { // allocating host-side arrays auto x = NDArrayFactory::create<double>('c', { 5 }, { 1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<double>(2); //.'c', { 5 }, { 1, 2, 3, 4, 5}); //auto z = NDArrayFactory::create<double>('c', { 5 }); auto exp = NDArrayFactory::create<double>('c', { 5 }, { 3, 4, 5, 6, 7 }); // making raw buffers //Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX; //hipError_t res = hipMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = hipMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = hipMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); //ASSERT_EQ(0, res); x += y; //x.applyPairwiseTransform(pairwise::Add, &y, &z, nullptr); x.syncToHost(); // hipFree(devBufferPtrX); //hipFree(devBufferPtrZ); //hipFree(devShapePtrX); for (int e = 0; e < x.lengthOf(); e++) { ASSERT_NEAR(exp.e<double>(e), x.e<double>(e), 1e-5); } } ////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, TestAdd_7) { // allocating host-side arrays auto x = NDArrayFactory::create<double>('c', { 5 }, { 1, 2, 3, 4, 5}); //auto y = NDArrayFactory::create<double>(2); //.'c', { 5 }, { 1, 2, 3, 4, 5}); //auto z = NDArrayFactory::create<double>('c', { 5 }); auto exp = NDArrayFactory::create<double>('c', { 5 }, { 3, 4, 5, 6, 7 }); // making raw buffers //Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX; //hipError_t res = hipMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = hipMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = hipMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); //ASSERT_EQ(0, res); x += 2.; //x.applyPairwiseTransform(pairwise::Add, &y, &z, nullptr); x.syncToHost(); // hipFree(devBufferPtrX); //hipFree(devBufferPtrZ); //hipFree(devShapePtrX); for (int e = 0; e < x.lengthOf(); e++) { ASSERT_NEAR(exp.e<double>(e), x.e<double>(e), 1e-5); } } ////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, TestMultiply_1) { // allocating host-side arrays auto x = NDArrayFactory::create<double>('c', { 5 }, { 1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<double>('c', { 5 }, { 1, 2, 3, 4, 5}); auto z = NDArrayFactory::create<double>('c', { 5 }); auto exp = NDArrayFactory::create<double>('c', { 5 }, { 1, 4, 9, 16, 25 }); // making raw buffers //Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX; //hipError_t res = hipMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = hipMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = hipMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); //ASSERT_EQ(0, res); x.applyPairwiseTransform(pairwise::Multiply, y, z); // x.printBuffer("3X = "); // y.printBuffer("3Y = "); // z.printBuffer("3Result out"); // // hipFree(devBufferPtrX); //hipFree(devBufferPtrZ); //hipFree(devShapePtrX); for (int e = 0; e < z.lengthOf(); e++) { ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); } } ////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, TestMultiply_2) { // allocating host-side arrays auto x = NDArrayFactory::create<double>('c', { 5 }, { 1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<double>('c', { 5 }, { 1, 2, 3, 4, 5}); NDArray z('c', { 5 }, nd4j::DataType::DOUBLE); auto exp = NDArrayFactory::create<double>('c', { 5 }, { 1, 4, 9, 16, 25 }); // making raw buffers //Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX; //hipError_t res = hipMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = hipMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = hipMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); //ASSERT_EQ(0, res); x.applyPairwiseTransform(pairwise::Multiply, y, z); // // hipFree(devBufferPtrX); //hipFree(devBufferPtrZ); //hipFree(devShapePtrX); for (int e = 0; e < z.lengthOf(); e++) { ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); } } ////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, TestMultiply_3) { // allocating host-side arrays NDArray x('c', { 5 }, { 1, 2, 3, 4, 5}, nd4j::DataType::DOUBLE); NDArray y('c', { 5 }, { 1., 2., 3., 4., 5.}, nd4j::DataType::DOUBLE); auto z = NDArrayFactory::create<double>('c', { 5 }); auto exp = NDArrayFactory::create<double>('c', { 5 }, { 1, 4, 9, 16, 25 }); // making raw buffers //Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX; //hipError_t res = hipMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = hipMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = hipMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); //ASSERT_EQ(0, res); x.applyPairwiseTransform(pairwise::Multiply, y, z); //x.printBuffer("23X = "); //y.printBuffer("23Y = "); // z.printBuffer("23Result out"); // // hipFree(devBufferPtrX); //hipFree(devBufferPtrZ); //hipFree(devShapePtrX); for (int e = 0; e < z.lengthOf(); e++) { ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); } } ////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, TestMultiply_4) { // allocating host-side arrays NDArray x('c', { 5 }, { 1, 2, 3, 4, 5}, nd4j::DataType::DOUBLE); NDArray y('c', { 5 }, { 1., 2., 3., 4., 5.}, nd4j::DataType::DOUBLE); //auto z = NDArrayFactory::create<double>('c', { 5 }); auto exp = NDArrayFactory::create<double>('c', { 5 }, { 1, 4, 9, 16, 25 }); // making raw buffers //Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX; //hipError_t res = hipMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = hipMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = hipMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); //ASSERT_EQ(0, res); //x.applyPairwiseTransform(pairwise::Multiply, &y, &z, nullptr); //x.printBuffer("23X = "); //y.printBuffer("23Y = "); x *= y; //x.tickWriteDevice(); // x.printBuffer("33Result out"); // // hipFree(devBufferPtrX); //hipFree(devBufferPtrZ); //hipFree(devShapePtrX); for (int e = 0; e < x.lengthOf(); e++) { ASSERT_NEAR(exp.e<double>(e), x.e<double>(e), 1e-5); } } ////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, TestPrimitiveNeg_01) { // allocating host-side arrays auto x = NDArrayFactory::create<int>('c', { 5 }, { 1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<int>('c', { 5 }, { 1, 2, 3, 4, 5}); auto exp = NDArrayFactory::create<int>('c', { 5 }, { -1, -2, -3, -4, -5 }); auto stream = x.getContext()->getCudaStream();//reinterpret_cast<hipStream_t *>(&nativeStream); NativeOpExecutioner::execTransformSame(x.getContext(), transform::Neg, x.buffer(), x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), y.buffer(), y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo(), nullptr, nullptr, nullptr); auto res = hipStreamSynchronize(*stream); ASSERT_EQ(0, res); y.tickWriteDevice(); // x.printBuffer("X = "); // y.printBuffer("Y = "); for (int e = 0; e < y.lengthOf(); e++) { ASSERT_NEAR(exp.e<int>(e), y.e<int>(e), 1e-5); } } TEST_F(NDArrayCudaBasicsTests, Test_PrimitiveNeg_2) { auto x = NDArrayFactory::create<double>('c', {5}, {1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<double>('c', {5}); ASSERT_TRUE(x.isActualOnDeviceSide()); ASSERT_FALSE(x.isActualOnHostSide()); x.applyTransform(transform::Neg, y); //ASSERT_TRUE(x->isActualOnDeviceSide()); //ASSERT_FALSE(x->isActualOnHostSide()); //ASSERT_TRUE(y->isActualOnDeviceSide()); //ASSERT_TRUE(y->isActualOnHostSide()); //auto res = hipStreamSynchronize(*y.getContext()->getCudaStream()); //ASSERT_EQ(0, res); // y.printBuffer("Negatives2"); //delete x; //delete y; } TEST_F(NDArrayCudaBasicsTests, Test_PrimitiveSqrt_1) { // strict auto x = NDArrayFactory::create<double>('c', {5}, {1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<double>('c', {5}); auto exp = NDArrayFactory::create<double>({1.000000, 1.414214, 1.732051, 2.000000, 2.236068}); ASSERT_TRUE(x.isActualOnDeviceSide()); ASSERT_FALSE(x.isActualOnHostSide()); x.applyTransform(transform::Sqrt, y); //ASSERT_TRUE(x->isActualOnDeviceSide()); //ASSERT_FALSE(x->isActualOnHostSide()); //ASSERT_TRUE(y->isActualOnDeviceSide()); //ASSERT_TRUE(y->isActualOnHostSide()); //auto res = hipStreamSynchronize(*y.getContext()->getCudaStream()); //ASSERT_EQ(0, res); ASSERT_TRUE(y.equalsTo(exp)); //y.printBuffer("SQRT output"); //delete x; //delete y; } TEST_F(NDArrayCudaBasicsTests, Test_PrimitiveAssign_1) { // strict auto x = NDArrayFactory::create<double>('c', {5}, {1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<double>('c', {5}); //auto exp = NDArrayFactory::create<double>({1.000000, 1.414214, 1.732051, 2.000000, 2.236068}); //ASSERT_TRUE(x.isActualOnDeviceSide()); //ASSERT_TRUE(x.isActualOnHostSide()); x.applyTransform(transform::Assign, y); //ASSERT_TRUE(x->isActualOnDeviceSide()); //ASSERT_FALSE(x->isActualOnHostSide()); //ASSERT_TRUE(y->isActualOnDeviceSide()); //ASSERT_TRUE(y->isActualOnHostSide()); //auto res = hipStreamSynchronize(*y.getContext()->getCudaStream()); //ASSERT_EQ(0, res); // printf("Assigned to another array\n"); // y.printBuffer("OUput"); ASSERT_TRUE(y.equalsTo(x)); //y.syncToHost(); //y.printBuffer("IsMax output"); //delete x; //delete y; } TEST_F(NDArrayCudaBasicsTests, Test_PrimitiveCosine_1) { // strict auto x = NDArrayFactory::create<double>('c', {5}, {1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<double>('c', {5}); auto exp = NDArrayFactory::create<double>('c', {5}, {0.540302, -0.416147, -0.989992, -0.653644, 0.283662}); ASSERT_TRUE(x.isActualOnDeviceSide()); ASSERT_FALSE(x.isActualOnHostSide()); x.applyTransform(transform::Cosine, y); //ASSERT_TRUE(x->isActualOnDeviceSide()); //ASSERT_FALSE(x->isActualOnHostSide()); //ASSERT_TRUE(y->isActualOnDeviceSide()); //ASSERT_TRUE(y->isActualOnHostSide()); //auto res = hipStreamSynchronize(*y.getContext()->getCudaStream()); //ASSERT_EQ(0, res); ASSERT_TRUE(exp.isSameShape(y)); ASSERT_TRUE(exp.dataType() == y.dataType()); //y.printBuffer("Cosine2"); //delete x; //delete y; } TEST_F(NDArrayCudaBasicsTests, Test_PrimitiveCosine_2) { auto x = NDArrayFactory::create<double>('c', {5}, {1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<double>('c', {5}); auto exp = NDArrayFactory::create<double>('c', {5}, {0.540302, -0.416147, -0.989992, -0.653644, 0.283662}); ASSERT_TRUE(x.isActualOnDeviceSide()); ASSERT_FALSE(x.isActualOnHostSide()); x.applyTransform(transform::Cosine, y); //ASSERT_TRUE(x->isActualOnDeviceSide()); //ASSERT_FALSE(x->isActualOnHostSide()); //ASSERT_TRUE(y->isActualOnDeviceSide()); //ASSERT_TRUE(y->isActualOnHostSide()); //auto res = hipStreamSynchronize(*y.getContext()->getCudaStream()); //ASSERT_EQ(0, res); //exp.syncToHost(); //y.printBuffer("PrimitiveCosine2"); //exp.printBuffer("Primitive Cosine exp"); ASSERT_TRUE(exp.isSameShape(y)); ASSERT_TRUE(exp.dataType() == y.dataType()); //for (int e = 0; e < y.lengthOf(); e++) { // ASSERT_NEAR(exp.e<double>(e), y.e<double>(e), 1e-5); //} ASSERT_TRUE(exp.equalsTo(y)); //delete x; //delete y; } TEST_F(NDArrayCudaBasicsTests, Test_PrimitiveCosine_3) { auto x = NDArrayFactory::create<double>('c', {5}, {1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<double>('c', {5}); auto exp = NDArrayFactory::create<double>({0.540302, -0.416147, -0.989992, -0.653644, 0.283662}); ASSERT_TRUE(x.isActualOnDeviceSide()); ASSERT_FALSE(x.isActualOnHostSide()); x.applyTransform(transform::Cosine, y); //ASSERT_TRUE(x->isActualOnDeviceSide()); //ASSERT_FALSE(x->isActualOnHostSide()); //ASSERT_TRUE(y->isActualOnDeviceSide()); //ASSERT_TRUE(y->isActualOnHostSide()); //auto res = hipStreamSynchronize(*y.getContext()->getCudaStream()); //ASSERT_EQ(0, res); //exp.syncToHost(); // y.printBuffer("PrimitiveCosine3"); // exp.printBuffer("Primitive Cosine3 exp"); // y.printShapeInfo("Y shape"); // exp.printShapeInfo("Exp Shape"); ASSERT_TRUE(exp.isSameShape(y)); // // for (int e = 0; e < y.lengthOf(); e++) { // printf("%lf == %lf\n", exp.e<double>(e), y.e<double>(e)); //// ASSERT_NEAR(exp.e<double>(e), y.e<double>(e), 1e-5); // } ASSERT_TRUE(exp.equalsTo(y)); //delete x; //delete y; } TEST_F(NDArrayCudaBasicsTests, TestRawBroadcast_2) { //if (!Environment::getInstance()->isExperimentalBuild()) // return; NDArray x = NDArrayFactory::create<double>('c', {2,3,4}); NDArray y('c', {2,4}, {10,20,30,40,50,60,70,80}, nd4j::DataType::DOUBLE); NDArray z('c', {2,3,4}, {100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100}, nd4j::DataType::DOUBLE); // NDArray exp('c', {2,3,4}, {10., 21., 32., 43., 14., 25., 36., 47., 18., 29., 40., 51., 62., 73., 84., 95., 66., 77., 88., 99., 70., 81., 92., 103}, nd4j::DataType::DOUBLE); NDArray exp('c', {2,3,4}, {10., 40., 90., 160., 50., 120., 210., 320., 90., 200., 330., 480., 650., 840., 1050., 1280., 850., 1080., 1330., 1600., 1050., 1320., 1610., 1920.}, nd4j::DataType::DOUBLE); x.linspace(1); x.syncToDevice(); std::vector<int> dimensions = {0,2}; // evaluate xTad data shape::TAD xTad; xTad.init(x.getShapeInfo(), dimensions.data(), dimensions.size()); xTad.createTadOnlyShapeInfo(); xTad.createOffsets(); // prepare input arrays for prepareDataForCuda function std::vector<std::pair<void*,size_t>> hostData; hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo)); // 1 -- xTadShapeInfo hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets std::vector<void*> devicePtrs(hostData.size(), nullptr); // create cuda stream and LaunchContext hipError_t cudaResult; hipStream_t stream; cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // allocate required amount of global device memory and copy host data to it cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult); // call cuda kernel which calculates result NativeOpExecutioner::execBroadcast(&lc, nd4j::broadcast::Multiply, nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, y.getShapeInfo(), y.specialBuffer(), y.specialShapeInfo(), nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(), (int*)devicePtrs[0], dimensions.size(), (Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2], nullptr, nullptr); cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // free allocated global device memory for(int i = 0; i < devicePtrs.size(); ++i) hipFree(devicePtrs[i]); // delete cuda stream cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } TEST_F(NDArrayCudaBasicsTests, TestRawBroadcast_3) { //if (!Environment::getInstance()->isExperimentalBuild()) // return; NDArray x('c', {2,3,4}, nd4j::DataType::DOUBLE); NDArray y('c', {2,4}, {10,20,30,40,50,60,70,80}, nd4j::DataType::DOUBLE); NDArray z('c', {2,3,4}, {100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100}, nd4j::DataType::DOUBLE); // NDArray exp('c', {2,3,4}, {10., 21., 32., 43., 14., 25., 36., 47., 18., 29., 40., 51., 62., 73., 84., 95., 66., 77., 88., 99., 70., 81., 92., 103}, nd4j::DataType::DOUBLE); NDArray exp('c', {2,3,4}, {10., 40., 90., 160., 50., 120., 210., 320., 90., 200., 330., 480., 650., 840., 1050., 1280., 850., 1080., 1330., 1600., 1050., 1320., 1610., 1920.}, nd4j::DataType::DOUBLE); x.linspace(1); x.syncToDevice(); std::vector<int> dimensions = {0,2}; // evaluate xTad data shape::TAD xTad; xTad.init(x.getShapeInfo(), dimensions.data(), dimensions.size()); xTad.createTadOnlyShapeInfo(); xTad.createOffsets(); // prepare input arrays for prepareDataForCuda function std::vector<std::pair<void*,size_t>> hostData; hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo)); // 1 -- xTadShapeInfo hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets std::vector<void*> devicePtrs(hostData.size(), nullptr); // create cuda stream and LaunchContext hipError_t cudaResult; //hipStream_t stream; //cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext* pLc = x.getContext();//(&stream); hipStream_t* stream = pLc->getCudaStream(); // allocate required amount of global device memory and copy host data to it // cudaResult = allocateDeviceMem(*pLc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult); for(int i = 0; i < devicePtrs.size(); ++i) { cudaResult = hipMalloc(reinterpret_cast<void **>(&devicePtrs[i]), hostData[i].second); ASSERT_EQ(0, cudaResult); hipMemcpyAsync(devicePtrs[i], hostData[i].first, hostData[i].second, hipMemcpyHostToDevice, *stream); } NDArray::registerSpecialUse({&z}, {&x, &y}); // call cuda kernel which calculates result NativeOpExecutioner::execBroadcast(pLc, nd4j::broadcast::Multiply, nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, y.getShapeInfo(), y.specialBuffer(), y.specialShapeInfo(), nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(), (int*)devicePtrs[0], dimensions.size(), (Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2], nullptr, nullptr); //cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); //z.syncToHost(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // free allocated global device memory for(int i = 0; i < devicePtrs.size(); ++i) hipFree(devicePtrs[i]); ASSERT_TRUE(exp.equalsTo(z)); // delete cuda stream //cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } TEST_F(NDArrayCudaBasicsTests, TestBroadcastMultiply_1) { // allocating host-side arrays NDArray x('c', { 2, 3 }, { 1, 2, 3, 4, 5, 6}, nd4j::DataType::DOUBLE); NDArray y = NDArrayFactory::create<double>(3.); //'c', { 3 }, { 2., 3., 4.}, nd4j::DataType::DOUBLE); //auto z = NDArrayFactory::create<double>('c', { 5 }); auto exp = NDArrayFactory::create<double>('c', { 2, 3 }, { 3, 6, 9, 12, 15, 18 }); // making raw buffers //Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX; //hipError_t res = hipMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = hipMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = hipMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); //ASSERT_EQ(0, res); //x.applyPairwiseTransform(pairwise::Multiply, &y, &z, nullptr); x *= y; //x.syncToHost(); // // hipFree(devBufferPtrX); //hipFree(devBufferPtrZ); //hipFree(devShapePtrX); ASSERT_TRUE(exp.equalsTo(x)); // for (int e = 0; e < x.lengthOf(); e++) { // ASSERT_NEAR(exp.e<double>(e), x.e<double>(e), 1e-5); // } } TEST_F(NDArrayCudaBasicsTests, TestBroadcastMultiply_01) { // allocating host-side arrays NDArray x('c', { 2, 3 }, { 1, 2, 3, 4, 5, 6}, nd4j::DataType::DOUBLE); NDArray y = NDArrayFactory::create<double>(3.); //'c', { 3 }, { 2., 3., 4.}, nd4j::DataType::DOUBLE); auto z = NDArrayFactory::create<double>('c', { 2, 3 }); auto exp = NDArrayFactory::create<double>('c', { 2, 3 }, { 3, 6, 9, 12, 15, 18 }); // making raw buffers //Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX; //hipError_t res = hipMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = hipMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = hipMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); //ASSERT_EQ(0, res); //x.applyPairwiseTransform(pairwise::Multiply, &y, &z, nullptr); //x.printBuffer("23X = "); //y.printBuffer("23Y = "); x.applyTrueBroadcast(BroadcastOpsTuple::Multiply(), y, z);// *= y; // z.printBuffer("53Result out"); // // hipFree(devBufferPtrX); //hipFree(devBufferPtrZ); //hipFree(devShapePtrX); ASSERT_TRUE(exp.equalsTo(z)); // for (int e = 0; e < x.lengthOf(); e++) { // ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // } } TEST_F(NDArrayCudaBasicsTests, TestBroadcastMultiply_02) { // allocating host-side arrays auto x = NDArrayFactory::create<double>('c', { 2, 3 }, { 1, 2, 3, 4, 5, 6}); //, nd4j::DataType::DOUBLE); auto y = NDArrayFactory::create<double>('c', {2,3}, {3, 3, 3, 3, 3, 3}); //'c', { 3 }, { 2., 3., 4.}, nd4j::DataType::DOUBLE); auto z = NDArrayFactory::create<double>('c', { 2, 3 }); auto exp = NDArrayFactory::create<double>('c', { 2, 3 }, { 3, 6, 9, 12, 15, 18 }); //if (x.isActualOnHostSide() && !x.isActualOnDeviceSide()) // making raw buffers //Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX; //hipError_t res = hipMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = hipMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = hipMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); //ASSERT_EQ(0, res); //x.applyPairwiseTransform(pairwise::Multiply, &y, &z, nullptr); //x.printBuffer("23X = "); //y.printBuffer("23Y = "); x.applyTrueBroadcast(BroadcastOpsTuple::Multiply(), y, z);// *= y; // z.printBuffer("52Result out"); // // hipFree(devBufferPtrX); //hipFree(devBufferPtrZ); //hipFree(devShapePtrX); ASSERT_TRUE(exp.equalsTo(z)); // for (int e = 0; e < x.lengthOf(); e++) { // ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // } } TEST_F(NDArrayCudaBasicsTests, TestBroadcastMultiply_002) { // allocating host-side arrays auto x = NDArrayFactory::create<double>('c', { 2, 3 }, { 1, 2, 3, 4, 5, 6}); //, nd4j::DataType::DOUBLE); auto y = NDArrayFactory::create<double>('c', {2, 3}, {2., 3., 3., 3., 3., 3.}); //'c', { 3 }, { 2., 3., 4.}, nd4j::DataType::DOUBLE); auto z = NDArrayFactory::create<double>('c', { 2, 3 }); auto exp = NDArrayFactory::create<double>('c', { 2, 3 }, { 2, 6, 9, 12, 15, 18 }); //if (x.isActualOnHostSide() && !x.isActualOnDeviceSide()) // making raw buffers //Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX; //hipError_t res = hipMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = hipMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = hipMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); //ASSERT_EQ(0, res); //x.applyPairwiseTransform(pairwise::Multiply, &y, &z, nullptr); //x.printBuffer("23X = "); //y.printBuffer("23Y = "); x.applyPairwiseTransform(pairwise::Multiply, y, z);// *= y; // z.printBuffer("51Result out"); // // hipFree(devBufferPtrX); //hipFree(devBufferPtrZ); //hipFree(devShapePtrX); ASSERT_TRUE(exp.equalsTo(z)); // for (int e = 0; e < x.lengthOf(); e++) { // ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // } } //////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, TestBroadcastRaw_1) { //if (!Environment::getInstance()->isExperimentalBuild()) // return; NDArray x('c', {2,3,4}, {100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100}, nd4j::DataType::INT32); NDArray y('c', {3}, {10, 20, 30}, nd4j::DataType::INT64); NDArray z('c', {2,3,4}, {100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100}, nd4j::DataType::INT32); NDArray exp('c', {2,3,4}, {10, 11, 12, 13,24, 25, 26, 27,38, 39, 40, 41,22, 23, 24, 25,36, 37, 38, 39,50, 51, 52, 53}, nd4j::DataType::INT32); //real output [10, 11, 12, 13, 4, 5, 6, 7, 28, 29, 30, 31, 22, 23, 24, 25, 16, 17, 18, 19, 40, 41, 42, 43] x.linspace(0); x.syncToDevice(); std::vector<int> dimensions = {1}; // evaluate xTad data shape::TAD xTad; xTad.init(x.getShapeInfo(), dimensions.data(), dimensions.size()); xTad.createTadOnlyShapeInfo(); xTad.createOffsets(); // prepare input arrays for prepareDataForCuda function std::vector<std::pair<void*,size_t>> hostData; hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(Nd4jLong)); // 0 -- dimensions hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo)); // 1 -- xTadShapeInfo hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets std::vector<void*> devicePtrs(hostData.size(), nullptr); // create cuda stream and LaunchContext hipError_t cudaResult; hipStream_t* stream = x.getContext()->getCudaStream(); LaunchContext* pLc = x.getContext(); // allocate required amount of global device memory and copy host data to it //cudaResult = allocateDeviceMem(*pLc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult); for(size_t i = 0; i < devicePtrs.size(); ++i) { cudaResult = hipMalloc(&devicePtrs[i], hostData[i].second); //if(cudaResult != 0) return cudaResult; ASSERT_EQ(cudaResult, 0); hipMemcpy(devicePtrs[i], hostData[i].first, hostData[i].second, hipMemcpyHostToDevice); } // call cuda kernel which calculates result NativeOpExecutioner::execBroadcast(pLc, nd4j::broadcast::Add, nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, y.getShapeInfo(), y.specialBuffer(), y.specialShapeInfo(), nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(), (int*)devicePtrs[0], dimensions.size(), (Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2], nullptr, nullptr); cudaResult = hipStreamSynchronize(*stream); ASSERT_EQ(0, cudaResult); // x.printIndexedBuffer(" X"); // y.printIndexedBuffer("+Y"); // z.printBuffer("ADD broadcasted output"); // verify results // for (int e = 0; e < z.lengthOf(); e++) // ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // free allocated global device memory for(int i = 0; i < devicePtrs.size(); ++i) hipFree(devicePtrs[i]); // delete cuda stream //cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } TEST_F(NDArrayCudaBasicsTests, TestBroadcastMultiply) { // allocating host-side arrays NDArray x('c', { 2, 3 }, { 1, 2, 3, 4, 5, 6}, nd4j::DataType::DOUBLE); NDArray y('c', { 3 }, { 2., 3., 4.}, nd4j::DataType::DOUBLE); //auto z = NDArrayFactory::create<double>('c', { 5 }); auto exp = NDArrayFactory::create<double>('c', { 2, 3 }, { 2, 6, 12, 8, 15, 24 }); // making raw buffers //Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX; //hipError_t res = hipMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = hipMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = hipMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); //ASSERT_EQ(0, res); //x.applyPairwiseTransform(pairwise::Multiply, &y, &z, nullptr); //x.printBuffer("23X = "); //y.printBuffer("23Y = "); x *= y; // // hipFree(devBufferPtrX); //hipFree(devBufferPtrZ); //hipFree(devShapePtrX); //for (int e = 0; e < x.lengthOf(); e++) { // ASSERT_NEAR(exp.e<double>(e), x.e<double>(e), 1e-5); //} } TEST_F(NDArrayCudaBasicsTests, TestBroadcastMultiply_2) { // allocating host-side arrays NDArray x('c', { 2, 3 }, { 1, 2, 3, 4, 5, 6}, nd4j::DataType::DOUBLE); NDArray y('c', { 3 }, { 2., 3., 4.}, nd4j::DataType::DOUBLE); //auto z = NDArrayFactory::create<double>('c', { 5 }); auto exp = NDArrayFactory::create<double>('c', { 2, 3 }, { 11,12, 13,14, 15, 16 }); auto expZ = NDArrayFactory::create<double>('c', { 2, 3 }, { 2, 6, 12, 8, 15, 24 }); // making raw buffers //Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX; //hipError_t res = hipMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = hipMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = hipMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); //ASSERT_EQ(0, res); //x.applyPairwiseTransform(pairwise::Multiply, &y, &z, nullptr); //x.printBuffer("23X = "); //y.printBuffer("23Y = "); //void NDArray::applyTrueBroadcast(nd4j::BroadcastOpsTuple op, const NDArray* other, NDArray* target, const bool checkTargetShape, ExtraArguments *extraArgs) x.applyTrueBroadcast(BroadcastOpsTuple::Multiply(), y, exp); // // hipFree(devBufferPtrX); //hipFree(devBufferPtrZ); //hipFree(devShapePtrX); //for (int e = 0; e < x.lengthOf(); e++) { // ASSERT_NEAR(exp.e<double>(e), x.e<double>(e), 1e-5); //} ASSERT_TRUE(exp.equalsTo(expZ)); } ////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, TestReduceSum_1) { // allocating host-side arrays auto x = NDArrayFactory::create<double>('c', { 5 }, { 1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<double>(15); auto exp = NDArrayFactory::create<double>(15); auto stream = x.getContext()->getCudaStream();//reinterpret_cast<hipStream_t *>(&nativeStream); NativeOpExecutioner::execReduceSameScalar(x.getContext(), reduce::Sum, x.buffer(), x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, y.buffer(), y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo()); auto res = hipStreamSynchronize(*stream); ASSERT_EQ(0, res); y.syncToHost(); ASSERT_NEAR(y.e<double>(0), 15, 1e-5); } ////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, TestDup1) { NDArray array('c', {2,3}, {1,2,3,4,5,6}); auto arrC = array.dup('c'); auto arrF = array.dup('f'); // arrC->printBuffer("arrC"); // arrF->printBuffer("arrF"); //arrC->printShapeInfo("C shape"); //arrF->printShapeInfo("F shape"); ASSERT_TRUE(array.equalsTo(arrF)); ASSERT_TRUE(array.equalsTo(arrC)); ASSERT_TRUE(arrF.equalsTo(arrC)); } ////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, equalsTo_1) { NDArray x('c', {2,5}, {1,2,3,4,5,6,7,8,9,10}, nd4j::DataType::DOUBLE); NDArray y('c', {2,5}, {1,2,3,4,5,6,7,8,9,10}, nd4j::DataType::DOUBLE); ASSERT_TRUE(x.equalsTo(y)); x.permutei({1,0}); y.permutei({1,0}); ASSERT_TRUE(x.equalsTo(y)); } ////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, equalsTo_2) { NDArray x('c', {2,5}, {1,2,3,4,5,6,7,8,10,10}, nd4j::DataType::DOUBLE); NDArray y('c', {2,5}, {1,2,5,4,5,6,7,8,9,10}, nd4j::DataType::DOUBLE); ASSERT_FALSE(x.equalsTo(y)); x.permutei({1,0}); y.permutei({1,0}); ASSERT_FALSE(x.equalsTo(y)); } ////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, equalsTo_3) { NDArray x('c', {2,5}, {1,2,3,4,5,6,7,8,9,10}, nd4j::DataType::DOUBLE); NDArray y('c', {2,5}, {1.f,2.f,3.f,4.f,5.f,6.f,7.f,8.f,9.f,10.f}, nd4j::DataType::FLOAT32); ASSERT_FALSE(x.equalsTo(y)); x.permutei({1,0}); y.permutei({1,0}); ASSERT_FALSE(x.equalsTo(y)); } //////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, applyReduce3_1) { NDArray x('c', {2,3,4}, {-10,-9,-8,-7,-6,-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7,8,9,10,11,12,13}, nd4j::DataType::INT32); NDArray x2('c', {2,3,4}, {-10,-9,-8,-7,-6,-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7,8,9,10,11,12,13}, nd4j::DataType::INT32); NDArray y('c', {2,3,4}, {-2,3,-4,5,-2,3,-4,5,-2,3,-4,5,-2,3,-4,5,-2,3,-4,5,-2,3,-4,5}, nd4j::DataType::INT32); NDArray k('c', {2,3}, {-2,3,-4,5,-2,3}, nd4j::DataType::INT32); NDArray k2('c', {3,2}, {-2,3,-4,5,-2,3}, nd4j::DataType::INT32); NDArray exp1('c', {3}, {4.f, 20.f, 36.f}, nd4j::DataType::FLOAT32); NDArray exp2('c', {2,3}, {-10.f, -2.f, 6.f,14.f, 22.f, 30.f}, nd4j::DataType::FLOAT32); NDArray exp3('c', {4}, {38.f, 41.f, 44.f, 47.f}, nd4j::DataType::FLOAT32); NDArray exp4('c', {4}, {114.f, 117.f, 120.f, 123.f}, nd4j::DataType::FLOAT32); NDArray z = x.applyReduce3(nd4j::reduce3::Dot, y, {0,2}); ASSERT_TRUE(z.equalsTo(&exp1)); z = x.applyReduce3(nd4j::reduce3::Dot, k, {0,1}); ASSERT_TRUE(z.equalsTo(&exp3)); x.permutei({0,2,1}); y.permutei({0,2,1}); z = y.applyReduce3(nd4j::reduce3::Dot, x, {1}); ASSERT_TRUE(z.equalsTo(&exp2)); x2.permutei({1,0,2}); z = x2.applyReduce3(nd4j::reduce3::Dot, k2, {0,1}); ASSERT_TRUE(z.equalsTo(&exp4)); } //////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, applyReduce3_2) { NDArray x('c', {2,3,4}, {-10,-9,-8.5,-7,-6,-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7,8,9,10,11,12,13}, nd4j::DataType::DOUBLE); NDArray x2('c', {2,3,4}, {-10,-9,-8,-7,-6,-5,-4,-3,-2,-1,0.5,1,2,3,4,5,6,7,8,9,10,11,12,13}, nd4j::DataType::DOUBLE); NDArray y('c', {2,3,4}, {-2,3,-4,5,-2,3,-4,5,-2,3,-4,5,-2.5,3,-4,5,-2,3,-4,5,-2,3,-4,5}, nd4j::DataType::DOUBLE); NDArray k('c', {2,3}, {-2,3,-4,5.5,-2,3}, nd4j::DataType::DOUBLE); NDArray k2('c', {3,2}, {-2,3,-4,5,-2,3.5}, nd4j::DataType::DOUBLE); NDArray exp1('c', {3}, {5., 20., 36.}, nd4j::DataType::DOUBLE); NDArray exp2('c', {2,3}, {-8., -2., 6., 13., 22., 30.}, nd4j::DataType::DOUBLE); NDArray exp3('c', {4}, {39., 42.5, 47., 49.5}, nd4j::DataType::DOUBLE); NDArray exp4('c', {4}, {119., 122.5, 125., 129.5}, nd4j::DataType::DOUBLE); NDArray z = x.applyReduce3(nd4j::reduce3::Dot, y, {0,2}); ASSERT_TRUE(z.equalsTo(&exp1)); z = x.applyReduce3(nd4j::reduce3::Dot, k, {0,1}); ASSERT_TRUE(z.equalsTo(&exp3)); x.permutei({0,2,1}); y.permutei({0,2,1}); z = y.applyReduce3(nd4j::reduce3::Dot, x, {1}); ASSERT_TRUE(z.equalsTo(&exp2)); x2.permutei({1,0,2}); z = x2.applyReduce3(nd4j::reduce3::Dot, k2, {0,1}); ASSERT_TRUE(z.equalsTo(&exp4)); } //////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, applyReduce3_3) { NDArray x1('c', {2,2,2}, {1,2,3,4,5,6,7,8}, nd4j::DataType::INT32); NDArray x2('c', {2,2,2}, {-1,-2,-3,-4,-5,-6,-7,-8}, nd4j::DataType::INT32); NDArray x3('c', {3,2}, {1.5,1.5,1.5,1.5,1.5,1.5}, nd4j::DataType::DOUBLE); NDArray x4('c', {3,2}, {1,2,3,4,5,6}, nd4j::DataType::DOUBLE); NDArray exp1('c', {}, std::vector<double>{-204}, nd4j::DataType::FLOAT32); NDArray exp2('c', {}, std::vector<double>{31.5}, nd4j::DataType::DOUBLE); auto z = x1.applyReduce3(reduce3::Dot, x2); ASSERT_TRUE(z.equalsTo(&exp1)); z = x3.applyReduce3(reduce3::Dot, x4); ASSERT_TRUE(z.equalsTo(&exp2)); x1.permutei({2,1,0}); x2.permutei({2,1,0}); x3.permutei({1,0}); x4.permutei({1,0}); z = x1.applyReduce3(reduce3::Dot, x2); ASSERT_TRUE(z.equalsTo(&exp1)); z = x3.applyReduce3(reduce3::Dot, x4); ASSERT_TRUE(z.equalsTo(&exp2)); } //////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, applyAllReduce3_1) { NDArray x1('c', {2,3,2}, {1,2,3,4,5,6,7,8,-1,-2,-3,-4,}, nd4j::DataType::INT32); NDArray x2('c', {2,2,2}, {-1,-2,-3,-4,-5,-6,-7,-8}, nd4j::DataType::INT32); NDArray x3('c', {3,2}, {1.5,1.5,1.5,1.5,1.5,1.5}, nd4j::DataType::DOUBLE); NDArray x4('c', {3,2}, {1,2,3,4,5,6}, nd4j::DataType::DOUBLE); NDArray exp1('c', {3,2}, {-88.f, -124.f, 6.f, -2.f, 22.f, 14.f}, nd4j::DataType::FLOAT32); NDArray exp2('c', {6,4}, {-36.f, -44.f, -52.f, -60.f,-42.f, -52.f, -62.f, -72.f, 2.f, 0.f, -2.f, -4.f, 6.f, 4.f, 2.f, 0.f, 10.f, 8.f, 6.f, 4.f, 14.f, 12.f, 10.f, 8.f}, nd4j::DataType::FLOAT32); NDArray exp3('c', {1,1}, std::vector<double>{31.5}, nd4j::DataType::DOUBLE); NDArray exp4('c', {3,3}, {4.5, 10.5, 16.5,4.5, 10.5, 16.5,4.5, 10.5, 16.5}, nd4j::DataType::DOUBLE); auto z = x1.applyAllReduce3(reduce3::Dot, x2, {0,2}); ASSERT_TRUE(z.equalsTo(&exp1)); z = x1.applyAllReduce3(reduce3::Dot, x2, {0}); ASSERT_TRUE(z.equalsTo(&exp2)); z = x3.applyAllReduce3(reduce3::Dot, x4, {0,1}); ASSERT_TRUE(z.equalsTo(&exp3)); z = x3.applyAllReduce3(reduce3::Dot, x4, {1}); ASSERT_TRUE(z.equalsTo(&exp4)); x1.permutei({2,1,0}); x2.permutei({2,1,0}); x3.permutei({1,0}); x4.permutei({1,0}); z = x1.applyAllReduce3(reduce3::Dot, x2, {0,2}); ASSERT_TRUE(z.equalsTo(&exp1)); z = x3.applyAllReduce3(reduce3::Dot, x4, {0}); ASSERT_TRUE(z.equalsTo(&exp4)); } ////////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, applyIndexReduce_test1) { NDArray x('c', {2,3}, {0, 10, 1, 2, 2.5,-4}, nd4j::DataType::DOUBLE); NDArray scalar('c', {}, std::vector<double>{100}, nd4j::DataType::INT64); NDArray vec1('c', {2}, {100,100}, nd4j::DataType::INT64); NDArray vec2('c', {3}, {100,100,100}, nd4j::DataType::INT64); NDArray exp1('c', {}, std::vector<double>{1}, nd4j::DataType::INT64); NDArray exp2('c', {2}, {1,1}, nd4j::DataType::INT64); NDArray exp3('c', {3}, {1,0,0}, nd4j::DataType::INT64); NDArray exp4('c', {}, std::vector<double>{2}, nd4j::DataType::INT64); NDArray exp5('c', {2}, {1,1}, nd4j::DataType::INT64); NDArray exp6('c', {3}, {1,0,0}, nd4j::DataType::INT64); x.applyIndexReduce(nd4j::indexreduce::IndexMax, scalar, {0,1}); ASSERT_TRUE(scalar.equalsTo(&exp1)); x.applyIndexReduce(nd4j::indexreduce::IndexMax, vec1, {1}); ASSERT_TRUE(vec1.equalsTo(&exp2)); x.applyIndexReduce(nd4j::indexreduce::IndexMax, vec2, {0}); ASSERT_TRUE(vec2.equalsTo(&exp3)); x.permutei({1,0}); x.applyIndexReduce(nd4j::indexreduce::IndexMax, scalar, {0,1}); ASSERT_TRUE(scalar.equalsTo(&exp4)); x.applyIndexReduce(nd4j::indexreduce::IndexMax, vec1, {0}); ASSERT_TRUE(vec1.equalsTo(&exp5)); x.applyIndexReduce(nd4j::indexreduce::IndexMax, vec2, {1}); ASSERT_TRUE(vec2.equalsTo(&exp6)); } ////////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, applyIndexReduce_test2) { NDArray x('c', {2,3}, {0, 10, 1, 2, 2.5,-4}, nd4j::DataType::DOUBLE); NDArray exp1('c', {}, std::vector<double>{1}, nd4j::DataType::INT64); NDArray exp2('c', {2}, {1,1}, nd4j::DataType::INT64); NDArray exp3('c', {3}, {1,0,0}, nd4j::DataType::INT64); NDArray exp4('c', {}, std::vector<double>{2}, nd4j::DataType::INT64); NDArray exp5('c', {2}, {1,1}, nd4j::DataType::INT64); NDArray exp6('c', {3}, {1,0,0}, nd4j::DataType::INT64); auto z = x.applyIndexReduce(nd4j::indexreduce::IndexMax, {0,1}); ASSERT_TRUE(z.equalsTo(&exp1)); z = x.applyIndexReduce(nd4j::indexreduce::IndexMax, {1}); ASSERT_TRUE(z.equalsTo(&exp2)); z = x.applyIndexReduce(nd4j::indexreduce::IndexMax, {0}); ASSERT_TRUE(z.equalsTo(&exp3)); x.permutei({1,0}); z = x.applyIndexReduce(nd4j::indexreduce::IndexMax, {0,1}); ASSERT_TRUE(z.equalsTo(&exp4)); z = x.applyIndexReduce(nd4j::indexreduce::IndexMax, {0}); ASSERT_TRUE(z.equalsTo(&exp5)); z = x.applyIndexReduce(nd4j::indexreduce::IndexMax, {1}); ASSERT_TRUE(z.equalsTo(&exp6)); } //////////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, reduceAlongDimension_float_test1) { NDArray x('c', {2,3,2}, {1,2,3,4,5,6,7,8,-1,-2,-3,-4,}, nd4j::DataType::INT32); NDArray z1('c', {}, std::vector<double>{100}, nd4j::DataType::DOUBLE); NDArray z2('c', {2,2}, {100,100,100,100}, nd4j::DataType::FLOAT32); NDArray z3('c', {3}, {100,100,100}, nd4j::DataType::DOUBLE); NDArray z4('c', {3,2}, {100,100,100,100,100,100}, nd4j::DataType::FLOAT32); NDArray z5('c', {2}, {100,100}, nd4j::DataType::FLOAT32); NDArray exp1('c', {}, std::vector<double>{2.166667}, nd4j::DataType::DOUBLE); NDArray exp2('c', {2,2}, {3.f,4.f,1.f,0.666667f}, nd4j::DataType::FLOAT32); NDArray exp3('c', {3}, {4.5,1,1}, nd4j::DataType::DOUBLE); NDArray exp4('c', {3,2}, {4,5,1,1,1,1}, nd4j::DataType::FLOAT32); NDArray exp5('c', {2}, {3.5f,0.833333f}, nd4j::DataType::FLOAT32); x.reduceAlongDimension(nd4j::reduce::Mean, z1, {0,1,2}); ASSERT_TRUE(z1.equalsTo(&exp1)); x.reduceAlongDimension(nd4j::reduce::Mean, z2, {1}); ASSERT_TRUE(z2.equalsTo(&exp2)); x.reduceAlongDimension(nd4j::reduce::Mean, z3, {0,2}); ASSERT_TRUE(z3.equalsTo(&exp3)); x.permutei({1,0,2}); // 3x2x2 x.reduceAlongDimension(nd4j::reduce::Mean, z1, {0,1,2}); ASSERT_TRUE(z1.equalsTo(&exp1)); x.reduceAlongDimension(nd4j::reduce::Mean, z4, {1}); ASSERT_TRUE(z4.equalsTo(&exp4)); x.reduceAlongDimension(nd4j::reduce::Mean, z5, {0,2}); ASSERT_TRUE(z5.equalsTo(&exp5)); } //////////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, reduceAlongDimension_float_test2) { NDArray x('c', {2,3,2}, {1,2,3,4,5,6,7,8,-1,-2,-3,-4,}, nd4j::DataType::DOUBLE); NDArray exp1('c', {}, std::vector<double>{2.166667}, nd4j::DataType::DOUBLE); NDArray exp2('c', {2,2}, {3,4,1,0.666667}, nd4j::DataType::DOUBLE); NDArray exp3('c', {3}, {4.5,1,1}, nd4j::DataType::DOUBLE); NDArray exp4('c', {3,2}, {4,5,1,1,1,1}, nd4j::DataType::DOUBLE); NDArray exp5('c', {2}, {3.5,0.833333}, nd4j::DataType::DOUBLE); NDArray z1 = x.reduceAlongDimension(nd4j::reduce::Mean, {0,1,2}); ASSERT_TRUE(z1.equalsTo(&exp1)); NDArray z2 = x.reduceAlongDimension(nd4j::reduce::Mean, {1}); ASSERT_TRUE(z2.equalsTo(&exp2)); NDArray z3 = x.reduceAlongDimension(nd4j::reduce::Mean, {0,2}); ASSERT_TRUE(z3.equalsTo(&exp3)); x.permutei({1,0,2}); // 3x2x2 NDArray z4 = x.reduceAlongDimension(nd4j::reduce::Mean, {0,1,2}); ASSERT_TRUE(z4.equalsTo(&exp1)); NDArray z5 = x.reduceAlongDimension(nd4j::reduce::Mean, {1}); ASSERT_TRUE(z5.equalsTo(&exp4)); NDArray z6 = x.reduceAlongDimension(nd4j::reduce::Mean, {0,2}); ASSERT_TRUE(z6.equalsTo(&exp5)); } ////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, EqualityTest1) { auto arrayA = NDArrayFactory::create_<float>('f', {3, 5}); auto arrayB = NDArrayFactory::create_<float>('f', {3, 5}); auto arrayC = NDArrayFactory::create_<float>('f', {3, 5}); auto arrayD = NDArrayFactory::create_<float>('f', {2, 4}); auto arrayE = NDArrayFactory::create_<float>('f', {1, 15}); for (int i = 0; i < arrayA->rows(); i++) { for (int k = 0; k < arrayA->columns(); k++) { arrayA->p(i, k, (float) i); } } for (int i = 0; i < arrayB->rows(); i++) { for (int k = 0; k < arrayB->columns(); k++) { arrayB->p(i, k, (float) i); } } for (int i = 0; i < arrayC->rows(); i++) { for (int k = 0; k < arrayC->columns(); k++) { arrayC->p(i, k, (float) i+1); } } ASSERT_TRUE(arrayA->equalsTo(arrayB, 1e-5)); ASSERT_FALSE(arrayC->equalsTo(arrayB, 1e-5)); ASSERT_FALSE(arrayD->equalsTo(arrayB, 1e-5)); ASSERT_FALSE(arrayE->equalsTo(arrayB, 1e-5)); delete arrayA; delete arrayB; delete arrayC; delete arrayD; delete arrayE; } //////////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, reduceAlongDimension_same_test1) { NDArray x('c', {2,3,2}, {1.5f,2.f,3.f,4.f,5.f,6.f,7.5f,8.f,-1.f,-2.f,-3.5f,-4.f}, nd4j::DataType::FLOAT32); NDArray z1('c', {}, std::vector<double>{100}, nd4j::DataType::FLOAT32); NDArray z2('c', {2,2}, {100,100,100,100}, nd4j::DataType::FLOAT32); NDArray z3('c', {3}, {100,100,100}, nd4j::DataType::FLOAT32); NDArray z4('c', {3,2}, {100,100,100,100,100,100}, nd4j::DataType::FLOAT32); NDArray z5('c', {2}, {100,100}, nd4j::DataType::FLOAT32); NDArray exp1('c', {}, std::vector<double>{26.5f}, nd4j::DataType::FLOAT32); NDArray exp2('c', {2,2}, {9.5f,12.f,3.f,2.f}, nd4j::DataType::FLOAT32); NDArray exp3('c', {3}, {19.f,4.f,3.5f}, nd4j::DataType::FLOAT32); NDArray exp4('c', {3,2}, {9.f,10.f,2.f,2.f,1.5f,2.f}, nd4j::DataType::FLOAT32); NDArray exp5('c', {2}, {21.5f,5.f}, nd4j::DataType::FLOAT32); x.reduceAlongDimension(nd4j::reduce::Sum, z1, {0,1,2}); ASSERT_TRUE(z1.equalsTo(&exp1)); x.reduceAlongDimension(nd4j::reduce::Sum, z2, {1}); ASSERT_TRUE(z2.equalsTo(&exp2)); x.reduceAlongDimension(nd4j::reduce::Sum, z3, {0,2}); ASSERT_TRUE(z3.equalsTo(&exp3)); x.permutei({1,0,2}); // 3x2x2 x.reduceAlongDimension(nd4j::reduce::Sum, z1, {0,1,2}); ASSERT_TRUE(z1.equalsTo(&exp1)); x.reduceAlongDimension(nd4j::reduce::Sum, z4, {1}); ASSERT_TRUE(z4.equalsTo(&exp4)); x.reduceAlongDimension(nd4j::reduce::Sum, z5, {0,2}); ASSERT_TRUE(z5.equalsTo(&exp5)); } //////////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, reduceAlongDimension_same_test2) { NDArray x('c', {2,3,2}, {1.5,2,3,4,5,6,7.5,8,-1,-2,-3.5,-4,}, nd4j::DataType::INT64); NDArray exp1('c', {}, std::vector<double>{26}, nd4j::DataType::INT64); NDArray exp2('c', {2,2}, {9,12,3,2}, nd4j::DataType::INT64); NDArray exp3('c', {3}, {18,4,4}, nd4j::DataType::INT64); NDArray exp4('c', {3,2}, {8,10,2,2,2,2}, nd4j::DataType::INT64); NDArray exp5('c', {2}, {21,5}, nd4j::DataType::INT64); NDArray z1 = x.reduceAlongDimension(nd4j::reduce::Sum, {0,1,2}); ASSERT_TRUE(z1.equalsTo(&exp1)); NDArray z2 = x.reduceAlongDimension(nd4j::reduce::Sum, {1}); ASSERT_TRUE(z2.equalsTo(&exp2)); NDArray z3 = x.reduceAlongDimension(nd4j::reduce::Sum, {0,2}); ASSERT_TRUE(z3.equalsTo(&exp3)); x.permutei({1,0,2}); // 3x2x2 NDArray z4 = x.reduceAlongDimension(nd4j::reduce::Sum, {0,1,2}); ASSERT_TRUE(z4.equalsTo(&exp1)); NDArray z5 = x.reduceAlongDimension(nd4j::reduce::Sum, {1}); ASSERT_TRUE(z5.equalsTo(&exp4)); NDArray z6 = x.reduceAlongDimension(nd4j::reduce::Sum, {0,2}); ASSERT_TRUE(z6.equalsTo(&exp5)); } //////////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, reduceAlongDimension_bool_test1) { NDArray x('c', {2,3,2}, {0.5,2,3,-4,5,6,-7.5,8,-1,-0.5,-3.5,4}, nd4j::DataType::DOUBLE); NDArray z1('c', {}, std::vector<double>{true}, nd4j::DataType::BOOL); NDArray z2('c', {2,2}, {true,true,true,true}, nd4j::DataType::BOOL); NDArray z3('c', {3}, {true,true,true}, nd4j::DataType::BOOL); NDArray z4('c', {3,2}, {true,true,true,true,true,true}, nd4j::DataType::BOOL); NDArray z5('c', {2}, {true,true}, nd4j::DataType::BOOL); NDArray exp1('c', {}, std::vector<double>{true}, nd4j::DataType::BOOL); NDArray exp2('c', {2,2}, {true,true,false,true}, nd4j::DataType::BOOL); NDArray exp3('c', {3}, {true,true,true}, nd4j::DataType::BOOL); NDArray exp4('c', {3,2}, {true,true,true,false,true,true}, nd4j::DataType::BOOL); NDArray exp5('c', {2}, {true,true}, nd4j::DataType::BOOL); x.reduceAlongDimension(nd4j::reduce::IsPositive, z1, {0,1,2}); ASSERT_TRUE(z1.equalsTo(&exp1)); x.reduceAlongDimension(nd4j::reduce::IsPositive, z2, {1}); ASSERT_TRUE(z2.equalsTo(&exp2)); x.reduceAlongDimension(nd4j::reduce::IsPositive, z3, {0,2}); ASSERT_TRUE(z3.equalsTo(&exp3)); x.permutei({1,0,2}); // 3x2x2 x.reduceAlongDimension(nd4j::reduce::IsPositive, z1, {0,1,2}); ASSERT_TRUE(z1.equalsTo(&exp1)); x.reduceAlongDimension(nd4j::reduce::IsPositive, z4, {1}); ASSERT_TRUE(z4.equalsTo(&exp4)); x.reduceAlongDimension(nd4j::reduce::IsPositive, z5, {0,2}); ASSERT_TRUE(z5.equalsTo(&exp5)); } //////////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, reduceAlongDimension_bool_test2) { NDArray x('c', {2,3,2}, {0.5,2,3,-4,5,6,-7.5,8,-1,-0.5,-3.5,4}, nd4j::DataType::INT32); NDArray exp1('c', {}, std::vector<double>{1}, nd4j::DataType::BOOL); NDArray exp2('c', {2,2}, {1,1,0,1}, nd4j::DataType::BOOL); NDArray exp3('c', {3}, {1,1,1}, nd4j::DataType::BOOL); NDArray exp4('c', {3,2}, {0,1,1,0,1,1}, nd4j::DataType::BOOL); NDArray exp5('c', {2}, {1,1}, nd4j::DataType::BOOL); NDArray z1 = x.reduceAlongDimension(nd4j::reduce::IsPositive, {0,1,2}); ASSERT_TRUE(z1.equalsTo(&exp1)); NDArray z2 = x.reduceAlongDimension(nd4j::reduce::IsPositive, {1}); ASSERT_TRUE(z2.equalsTo(&exp2)); NDArray z3 = x.reduceAlongDimension(nd4j::reduce::IsPositive, {0,2}); ASSERT_TRUE(z3.equalsTo(&exp3)); x.permutei({1,0,2}); // 3x2x2 NDArray z4 = x.reduceAlongDimension(nd4j::reduce::IsPositive, {0,1,2}); ASSERT_TRUE(z4.equalsTo(&exp1)); NDArray z5 = x.reduceAlongDimension(nd4j::reduce::IsPositive, {1}); ASSERT_TRUE(z5.equalsTo(&exp4)); NDArray z6 = x.reduceAlongDimension(nd4j::reduce::IsPositive, {0,2}); ASSERT_TRUE(z6.equalsTo(&exp5)); } //////////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, reduceAlongDimension_long_test1) { NDArray x('c', {2,3,2}, {0.5f,2.f,3.f,-0.f,5.f,6.f,-7.5f,0.f,-1.f,-0.5f,-3.5f,4.f}, nd4j::DataType::FLOAT32); NDArray z1('c', {}, std::vector<double>{100}, nd4j::DataType::INT64); NDArray z2('c', {2,2}, {100,100,100,100}, nd4j::DataType::INT64); NDArray z3('c', {3}, {100,100,100}, nd4j::DataType::INT64); NDArray z4('c', {3,2}, {100,100,100,100,100,100}, nd4j::DataType::INT64); NDArray z5('c', {2}, {100,100}, nd4j::DataType::INT64); NDArray exp1('c', {}, std::vector<double>{2}, nd4j::DataType::INT64); NDArray exp2('c', {2,2}, {0,1,0,1}, nd4j::DataType::INT64); NDArray exp3('c', {3}, {1,1,0}, nd4j::DataType::INT64); NDArray exp4('c', {3,2}, {0,1,0,1,0,0}, nd4j::DataType::INT64); NDArray exp5('c', {2}, {1,1}, nd4j::DataType::INT64); x.reduceAlongDimension(nd4j::reduce::CountZero, z1, {0,1,2}); ASSERT_TRUE(z1.equalsTo(&exp1)); x.reduceAlongDimension(nd4j::reduce::CountZero, z2, {1}); ASSERT_TRUE(z2.equalsTo(&exp2)); x.reduceAlongDimension(nd4j::reduce::CountZero, z3, {0,2}); ASSERT_TRUE(z3.equalsTo(&exp3)); x.permutei({1,0,2}); // 3x2x2 x.reduceAlongDimension(nd4j::reduce::CountZero, z1, {0,1,2}); ASSERT_TRUE(z1.equalsTo(&exp1)); x.reduceAlongDimension(nd4j::reduce::CountZero, z4, {1}); ASSERT_TRUE(z4.equalsTo(&exp4)); x.reduceAlongDimension(nd4j::reduce::CountZero, z5, {0,2}); ASSERT_TRUE(z5.equalsTo(&exp5)); } //////////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, reduceAlongDimension_long_test2) { NDArray x('c', {2,3,2}, {0.5,2,3,-0,5,6,-7.5,0,-1,-0.5,-3.5,4}, nd4j::DataType::INT32); NDArray exp1('c', {}, std::vector<double>{4}, nd4j::DataType::INT64); NDArray exp2('c', {2,2}, {1,1,0,2}, nd4j::DataType::INT64); NDArray exp3('c', {3}, {2,2,0}, nd4j::DataType::INT64); NDArray exp4('c', {3,2}, {1,1,0,2,0,0}, nd4j::DataType::INT64); NDArray exp5('c', {2}, {2,2}, nd4j::DataType::INT64); NDArray z1 = x.reduceAlongDimension(nd4j::reduce::CountZero, {0,1,2}); ASSERT_TRUE(z1.equalsTo(&exp1)); NDArray z2 = x.reduceAlongDimension(nd4j::reduce::CountZero, {1}); ASSERT_TRUE(z2.equalsTo(&exp2)); NDArray z3 = x.reduceAlongDimension(nd4j::reduce::CountZero, {0,2}); ASSERT_TRUE(z3.equalsTo(&exp3)); x.permutei({1,0,2}); // 3x2x2 NDArray z4 = x.reduceAlongDimension(nd4j::reduce::CountZero, {0,1,2}); ASSERT_TRUE(z4.equalsTo(&exp1)); NDArray z5 = x.reduceAlongDimension(nd4j::reduce::CountZero, {1}); ASSERT_TRUE(z5.equalsTo(&exp4)); NDArray z6 = x.reduceAlongDimension(nd4j::reduce::CountZero, {0,2}); ASSERT_TRUE(z6.equalsTo(&exp5)); } TEST_F(NDArrayCudaBasicsTests, BroadcastOpsTest1) { auto x = NDArrayFactory::create<float>('c', {5, 5}); auto z = NDArrayFactory::create<float>('c', {5, 5}); auto row = NDArrayFactory::linspace(1.0f, 5.0f, 5); NDArray expRow('c', {1, 5,}, {1,2,3,4,5}, nd4j::DataType::FLOAT32); NDArray exp('c', {5,5}, {1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5}, nd4j::DataType::FLOAT32); ASSERT_TRUE(row->equalsTo(&expRow)); x.applyBroadcast(broadcast::Add, {1}, *row, z); x += *row; ASSERT_TRUE(x.equalsTo(z)); //ASSERT_TRUE(z.equalsTo(&exp)); delete row; } TEST_F(NDArrayCudaBasicsTests, BroadcastOpsTest2) { auto x = NDArrayFactory::create<float>('c', {5, 5}); //auto z = NDArrayFactory::create<float>('c', {5, 5}); auto row = NDArrayFactory::linspace(1.0f, 5.0f, 5); NDArray expRow('c', {1, 5,}, {1,2,3,4,5}, nd4j::DataType::FLOAT32); NDArray exp('c', {5,5}, {1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5}, nd4j::DataType::FLOAT32); ASSERT_TRUE(row->equalsTo(&expRow)); x.applyBroadcast(broadcast::Add, {1}, *row, x); ASSERT_TRUE(x.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, TestBroadcast_1) { NDArray exp('c', {2, 3, 2, 2}, {1., 1., 1., 1., 2., 2., 2., 2., 3., 3., 3., 3., 1., 1., 1., 1., 2., 2., 2., 2., 3., 3., 3., 3.}, nd4j::DataType::DOUBLE); auto input = NDArrayFactory::create<double>('c',{ 2, 3, 2, 2}); auto bias = NDArrayFactory::create<double>('c', {1, 3}); bias.linspace(1); input.applyBroadcast(broadcast::Add, {1}, bias, input); ASSERT_TRUE(exp.equalsTo(&input)); } TEST_F(NDArrayCudaBasicsTests, TestFloat16_1) { auto x = NDArrayFactory::create<float>({1,2,3,4,5,7,8,9}); auto y = NDArrayFactory::create<float>({1,2,3,4,5,7,8,9}); ASSERT_TRUE(x.equalsTo(&y)); } TEST_F(NDArrayCudaBasicsTests, TestFloat16_2) { auto x = NDArrayFactory::create<float16>('c', {9}, {1,2,3,4,5,6,7,8,9}); auto y = NDArrayFactory::create<float16>('c', {9}, {1,2,3,4,5,6,7,8,9}); ASSERT_TRUE(x.equalsTo(y)); //for (int e = 0; e < x.lengthOf(); e++) // ASSERT_NEAR(x.e<float16>(e), y.e<float16>(e), 1.e-5f); } TEST_F(NDArrayCudaBasicsTests, TestFloat16_3) { auto x = NDArrayFactory::create<bfloat16>({1,2,3,4,5,7,8,9}); auto y = NDArrayFactory::create<bfloat16>({1,2,3,4,5,7,8,9}); ASSERT_TRUE(x.equalsTo(&y)); } TEST_F(NDArrayCudaBasicsTests, TestFloat_4) { auto x = NDArrayFactory::create<float>({1,2,3,4,5,7,8,9}); auto y = NDArrayFactory::create<float>({2,4,5,5,6,7,8,9}); ASSERT_FALSE(x.equalsTo(&y)); } TEST_F(NDArrayCudaBasicsTests, TestFloat_5) { auto x = NDArrayFactory::create<float>('c', {3,3}, {1,2,3,4,5,6,7,8,9}); auto y = NDArrayFactory::create<float>('c', {3,3}, {2,4,5,5,6,7,8,9, 10}); ASSERT_FALSE(x.equalsTo(&y)); } TEST_F(NDArrayCudaBasicsTests, TestFloat_6) { auto x = NDArrayFactory::create<float>('f', {3,3}, {1,2,3,4,5,6,7,8,9}); auto y = NDArrayFactory::create<float>('f', {3,3}, {2,4,5,5,6,7,8,9,10}); ASSERT_FALSE(x.equalsTo(&y)); } ////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, Operator_Plus_Test_05) { auto x = NDArrayFactory::create<float>('c', {8, 8, 8}); auto y = NDArrayFactory::create<float>('c', {1, 8, 8}); auto expected = NDArrayFactory::create<float>('c', {8, 8, 8}); NDArray res2 = NDArrayFactory::create<float>(expected.ordering(), expected.getShapeAsVector()); x = 1.; y = 2.; expected = 3.; res2 = 0.f; x.applyTrueBroadcast(BroadcastOpsTuple::Add(), y, res2);// *= y; ASSERT_TRUE(expected.isSameShape(&res2)); ASSERT_TRUE(expected.equalsTo(&res2)); } ////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, Operator_Plus_Test_5) { auto x = NDArrayFactory::create<float>('c', {8, 8, 8}); auto y = NDArrayFactory::create<float>('c', {8, 1, 8}); auto expected = NDArrayFactory::create<float>('c', {8, 8, 8}); NDArray res2(expected); x = 1.; y = 2.; expected = 3.; //x.printBuffer("X="); //y.printBuffer("Y="); //expected.printBuffer("EXPECTED"); auto result = x + y; //result.printBuffer("1 + 2 ="); //res2.assign(x + y); //x.applyTrueBroadcast(BroadcastOpsTuple::Add(), &y, &res2); //res2.printBuffer("Z="); //x.applyTrueBroadcast(BroadcastOpsTuple::Add(), &y, &res2);// *= y; // x += y; //x.printBuffer("OutputX"); //res2.syncToHost(); //res2.printBuffer("OUputZ"); //x.printIndexedBuffer("OUtputX"); ASSERT_TRUE(expected.isSameShape(&result)); ASSERT_TRUE(expected.equalsTo(&result)); } ////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, Operator_Plus_Test_51) { auto x = NDArrayFactory::create<float>('c', {8, 8, 8}); auto y = NDArrayFactory::create<float>('c', {8, 8}); auto expected = NDArrayFactory::create<float>('c', {8, 8, 8}); NDArray res2(expected); x = 1.; y = 2.; expected = 3.; //x.printBuffer("X="); //y.printBuffer("Y="); //expected.printBuffer("EXPECTED"); auto result = x + y; //result.printBuffer("1 + 2 ="); //res2.assign(x + y); //x.applyTrueBroadcast(BroadcastOpsTuple::Add(), &y, &res2); //res2.printBuffer("Z="); //x.applyTrueBroadcast(BroadcastOpsTuple::Add(), &y, &res2);// *= y; // x += y; //x.printBuffer("OutputX"); //res2.syncToHost(); //res2.printBuffer("OUputZ"); //x.printIndexedBuffer("OUtputX"); ASSERT_TRUE(expected.isSameShape(&result)); ASSERT_TRUE(expected.equalsTo(&result)); } TEST_F(NDArrayCudaBasicsTests, Tile_Test_2_1) { auto x = NDArrayFactory::create<float>('c', {2, 1, 2}); x = 10.; auto y = x.tile({1,2,1}); auto exp = NDArrayFactory::create<float>('c', {2, 2, 2}); exp = 10.; // y.printShapeInfo("Output SHAPE"); // y.printBuffer("Output TILE"); // exp.printBuffer("Expect TILE"); ASSERT_TRUE(exp.equalsTo(y)); } TEST_F(NDArrayCudaBasicsTests, Tile_Test_2_2) { auto x = NDArrayFactory::create<float>('f', {2, 1, 2}); x = 10.; auto y = x.tile({1,2,1}); auto exp = NDArrayFactory::create<float>('f', {2, 2, 2}); exp = 10.; ASSERT_TRUE(exp.equalsTo(y)); } TEST_F(NDArrayCudaBasicsTests, Tile_Test_2_3) { auto x = NDArrayFactory::create<float>('f', {2, 1, 2}); x = 10.; x.p(1,0,1, 20); x.syncToDevice(); auto y = x.tile({1,2,1}); auto exp = NDArrayFactory::create<float>('f', {2, 2, 2}); exp = 10.; exp.p(1,0,1, 20.); exp.p(1, 1, 1, 20.); exp.syncToDevice(); ASSERT_TRUE(exp.equalsTo(y)); } ////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, Operator_Plus_Test_2) { double expBuff[] = {2., 3, 3., 4., 4., 5, 5., 6., 6., 7, 7., 8.}; NDArray a('c', {4,4}, {1,2,3,4,5,6,7,8,9,2,3,2,1,0,4,7}, nd4j::DataType::FLOAT32); auto x = NDArrayFactory::create<double>('c', {3, 2, 1}); auto y = NDArrayFactory::create<double>('c', {1, 2}); auto expected = NDArrayFactory::create<double>(expBuff, 'c', {3, 2, 2}); x.linspace(1); y.linspace(1); auto result = x + y; ASSERT_TRUE(expected.isSameShape(&result)); ASSERT_TRUE(expected.equalsTo(&result)); } ////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, assign_2) { NDArray x('c', {4}, {1.5f,2.5f,3.5f,4.5f}, nd4j::DataType::FLOAT32); NDArray y('c', {4}, nd4j::DataType::INT32); NDArray expected('c', {4}, {1,2,3,4}, nd4j::DataType::INT32); y.assign(x); // y.printBuffer("ASSIGN VECTOR"); ASSERT_TRUE(expected.equalsTo(&y)); } ////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, subarray_1) { NDArray x('c', {2,3,4}, {1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24}, nd4j::DataType::FLOAT32); NDArray y('f', {2,3,4}, {1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24}, nd4j::DataType::FLOAT32); Nd4jLong shapeExpX0[] = {1, 2, 12, 8192, 1, 99}; float buffExpX0[] = {1.f, 13.f}; Nd4jLong shapeExpX1[] = {1, 2, 12, 8192, 1, 99}; float buffExpX1[] = {2.f, 14.f}; Nd4jLong shapeExpX2[] = {3, 2, 1, 1, 12, 4, 1, 8192, 1, 99}; float buffExpX2[] = {1.f, 13.f}; Nd4jLong shapeExpX3[] = {2, 2, 4, 12, 1, 8192, 1, 99}; float buffExpX3[] = {9.f, 10.f, 11.f, 12.f, 21.f, 22.f, 23.f, 24.f}; Nd4jLong shapeExpX4[] = {3, 2, 1, 4, 12, 4, 1, 8192, 1, 99}; float buffExpX4[] = {9.f, 10.f, 11.f, 12.f, 21.f, 22.f, 23.f, 24.f}; Nd4jLong shapeExpX5[] = {2, 2, 3, 12, 4, 8192, 1, 99}; float buffExpX5[] = {4.f, 8.f, 12.f, 16.f, 20.f, 24.f}; Nd4jLong shapeExpY0[] = {1, 2, 1, 8192, 1, 99}; float buffExpY0[] = {1.f, 2.f}; Nd4jLong shapeExpY1[] = {1, 2, 1, 8192, 1, 99}; float buffExpY1[] = {7.f, 8.f}; Nd4jLong shapeExpY2[] = {3, 2, 1, 1, 1, 2, 6, 8192, 1, 102}; float buffExpY2[] = {1.f, 2.f}; Nd4jLong shapeExpY3[] = {2, 2, 4, 1, 6, 8192, 1, 99}; float buffExpY3[] = {5.f, 11.f, 17.f, 23.f, 6.f, 12.f, 18.f, 24.f}; Nd4jLong shapeExpY4[] = {3, 2, 1, 4, 1, 2, 6, 8192, 1, 102}; float buffExpY4[] = {5.f, 11.f, 17.f, 23.f, 6.f, 12.f, 18.f, 24.f}; Nd4jLong shapeExpY5[] = {2, 2, 3, 1, 2, 8192, 1, 99}; float buffExpY5[] = {19.f, 21.f, 23.f, 20.f, 22.f, 24.f}; NDArray x0 = x(0, {1,2}); NDArray xExp(buffExpX0, shapeExpX0); ASSERT_TRUE(xExp.isSameShape(x0)); ASSERT_TRUE(xExp.equalsTo(x0)); // for(int i = 0; i < shape::shapeInfoLength(x0.rankOf()); ++i) // ASSERT_TRUE(x0.getShapeInfo()[i] == shapeExpX0[i]); // for(int i = 0; i < x0.lengthOf(); ++i) // ASSERT_TRUE(x0.e<float>(i) == buffExpX0[i]); NDArray x1 = x(1, {1,2}); NDArray x1Exp(buffExpX1, shapeExpX1); ASSERT_TRUE(x1Exp.isSameShape(x1)); ASSERT_TRUE(x1Exp.equalsTo(x1)); // for(int i = 0; i < shape::shapeInfoLength(x1.rankOf()); ++i) // ASSERT_TRUE(x1.getShapeInfo()[i] == shapeExpX1[i]); // for(int i = 0; i < x1.lengthOf(); ++i) // ASSERT_TRUE(x1.e<float>(i) == buffExpX1[i]); NDArray x2 = x(0, {1,2}, true); NDArray x2Exp(buffExpX2, shapeExpX2); ASSERT_TRUE(x2Exp.isSameShape(x2)); // x2.printBuffer("X2"); // x2Exp.printBuffer("X2 EXPECT"); ASSERT_TRUE(x2Exp.equalsTo(x2)); // for(int i = 0; i < shape::shapeInfoLength(x2.rankOf()); ++i) // ASSERT_TRUE(x2.getShapeInfo()[i] == shapeExpX2[i]); // for(int i = 0; i < x2.lengthOf(); ++i) // ASSERT_TRUE(x2.e<float>(i) == buffExpX2[i]); NDArray x3 = x(2, {1}); NDArray x3Exp(buffExpX3, shapeExpX3); ASSERT_TRUE(x3Exp.isSameShape(x3)); ASSERT_TRUE(x3Exp.equalsTo(x3)); // for(int i = 0; i < shape::shapeInfoLength(x3.rankOf()); ++i) // ASSERT_TRUE(x3.getShapeInfo()[i] == shapeExpX3[i]); // for(int i = 0; i < x3.lengthOf(); ++i) // ASSERT_TRUE(x3.e<float>(i) == buffExpX3[i]); NDArray x4 = x(2, {1}, true); NDArray x4Exp(buffExpX4, shapeExpX4); ASSERT_TRUE(x4Exp.isSameShape(x4)); ASSERT_TRUE(x4Exp.equalsTo(x4)); // for(int i = 0; i < shape::shapeInfoLength(x4.rankOf()); ++i) // ASSERT_TRUE(x4.getShapeInfo()[i] == shapeExpX4[i]); // for(int i = 0; i < x4.lengthOf(); ++i) // ASSERT_TRUE(x4.e<float>(i) == buffExpX4[i]); NDArray x5 = x(3, {2}); NDArray x5Exp(buffExpX5, shapeExpX5); ASSERT_TRUE(x5Exp.isSameShape(x5)); ASSERT_TRUE(x5Exp.equalsTo(x5)); // for(int i = 0; i < shape::shapeInfoLength(x5.rankOf()); ++i) // ASSERT_TRUE(x5.getShapeInfo()[i] == shapeExpX5[i]); // for(int i = 0; i < x5.lengthOf(); ++i) // ASSERT_TRUE(x5.e<float>(i) == buffExpX5[i]); // ******************* // NDArray y0 = y(0, {1,2}); NDArray y0Exp(buffExpY0, shapeExpY0); ASSERT_TRUE(y0Exp.isSameShape(y0)); ASSERT_TRUE(y0Exp.equalsTo(y0)); // for(int i = 0; i < shape::shapeInfoLength(y0.rankOf()); ++i) // ASSERT_TRUE(y0.getShapeInfo()[i] == shapeExpY0[i]); // for(int i = 0; i < y0.lengthOf(); ++i) // ASSERT_TRUE(y0.e<float>(i) == buffExpY0[i]); NDArray y1 = y(1, {1,2}); NDArray y1Exp(buffExpY1, shapeExpY1); ASSERT_TRUE(y1Exp.isSameShape(y1)); ASSERT_TRUE(y1Exp.equalsTo(y1)); // for(int i = 0; i < shape::shapeInfoLength(y1.rankOf()); ++i) // ASSERT_TRUE(y1.getShapeInfo()[i] == shapeExpY1[i]); // for(int i = 0; i < y1.lengthOf(); ++i) // ASSERT_TRUE(y1.e<float>(i) == buffExpY1[i]); NDArray y2 = y(0, {1,2}, true); NDArray y2Exp(buffExpY2, shapeExpY2); ASSERT_TRUE(y2Exp.isSameShape(y2)); ASSERT_TRUE(y2Exp.equalsTo(y2)); // for(int i = 0; i < shape::shapeInfoLength(y2.rankOf()); ++i) // ASSERT_TRUE(y2.getShapeInfo()[i] == shapeExpY2[i]); // for(int i = 0; i < y2.lengthOf(); ++i) // ASSERT_TRUE(y2.e<float>(i) == buffExpY2[i]); NDArray y3 = y(2, {1}); NDArray y3Exp(buffExpY3, shapeExpY3); ASSERT_TRUE(y3Exp.isSameShape(y3)); ASSERT_TRUE(y3Exp.equalsTo(y3)); // for(int i = 0; i < shape::shapeInfoLength(y3.rankOf()); ++i) // ASSERT_TRUE(y3.getShapeInfo()[i] == shapeExpY3[i]); // for(int i = 0; i < y3.lengthOf(); ++i) // ASSERT_TRUE(y3.e<float>(i) == buffExpY3[i]); NDArray y4 = y(2, {1}, true); NDArray y4Exp = NDArrayFactory::create<float>('f', {2,1,4}, {5, 6, 11, 12, 17, 18, 23, 24}); ASSERT_TRUE(y4Exp.isSameShape(y4)); ASSERT_TRUE(y4Exp.equalsTo(y4)); // for(int i = 0; i < shape::shapeInfoLength(y4.rankOf()); ++i) // ASSERT_TRUE(y4.getShapeInfo()[i] == shapeExpY4[i]); // for(int i = 0; i < y4.lengthOf(); ++i) // ASSERT_TRUE(y4.e<float>(i) == buffExpY4[i]); NDArray y5 = y(3, {2}); NDArray y5Exp(buffExpY5, shapeExpY5); ASSERT_TRUE(y5Exp.isSameShape(y5)); ASSERT_TRUE(y5Exp.equalsTo(y5)); // for(int i = 0; i < shape::shapeInfoLength(y5.rankOf()); ++i) // ASSERT_TRUE(y5.getShapeInfo()[i] == shapeExpY5[i]); // for(int i = 0; i < y5.lengthOf(); ++i) // ASSERT_TRUE(y5.e<float>(i) == buffExpY5[i]); } ////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, Test_diagonal_1) { auto x = NDArrayFactory::create<float>('c', {2, 3}, {1, 2, 3, 4, 5, 6}); auto exp = NDArrayFactory::create<float>('c', {2, 1}, {1, 5}); auto diag = x.diagonal('c'); //diag.syncToDevice(); for (Nd4jLong e = 0; e < exp.lengthOf(); ++e) { printf("VAL[%ld] = %f\n", e, diag.e<float>(e)); //, exp.e<float>(e), 1.e-5); } for (Nd4jLong e = 0; e < exp.lengthOf(); ++e) { ASSERT_NEAR(diag.e<float>(e), exp.e<float>(e), 1.e-5); } double eps(1.e-5); NDArray tmp(nd4j::DataType::FLOAT32, x.getContext()); // scalar = 0 ExtraArguments extras({eps}); NativeOpExecutioner::execReduce3Scalar(diag.getContext(), reduce3::EqualsWithEps, diag.getBuffer(), diag.getShapeInfo(), diag.getSpecialBuffer(), diag.getSpecialShapeInfo(), extras.argumentsAsT(nd4j::DataType::FLOAT32), exp.getBuffer(), exp.getShapeInfo(), exp.getSpecialBuffer(), exp.getSpecialShapeInfo(), tmp.buffer(), tmp.shapeInfo(), tmp.specialBuffer(), tmp.specialShapeInfo()); hipStream_t* stream = x.getContext()->getCudaStream(); auto res = hipStreamSynchronize(*stream); // tmp.printBuffer("Compare result is (expected 0)"); ASSERT_TRUE(exp.isSameShape(diag)); ASSERT_TRUE(exp.equalsTo(diag)); } TEST_F(NDArrayCudaBasicsTests, Test_PermuteEquality_02) { auto x = NDArrayFactory::linspace<float>(1.f, 60.f, 60); //('c', {1, 60}); //x.linspace(1); auto exp = NDArrayFactory::create<float>('c', {3, 4, 5}, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f, 25.0f, 26.0f, 27.0f, 28.0f, 29.0f, 30.0f, 31.0f, 32.0f, 33.0f, 34.0f, 35.0f, 36.0f, 37.0f, 38.0f, 39.0f, 40.0f, 41.0f, 42.0f, 43.0f, 44.0f, 45.0f, 46.0f, 47.0f, 48.0f, 49.0f, 50.0f, 51.0f, 52.0f, 53.0f, 54.0f, 55.0f, 56.0f, 57.0f, 58.0f, 59.0f, 60.0}); x->reshapei('c', {3, 4, 5}); x->permutei({0, 1, 2}); x->streamline(); // x.printShapeInfo("{0, 1, 2} shape"); // x.printBuffer("{0, 1, 2} data"); ASSERT_TRUE(exp.isSameShape(x)); ASSERT_TRUE(exp.equalsTo(x)); delete x; } TEST_F(NDArrayCudaBasicsTests, Test_PermuteEquality_0) { auto x = NDArrayFactory::create<float>('c', {1, 60}); x.linspace(1); auto exp = NDArrayFactory::create<float>('c', {3, 4, 5}, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f, 25.0f, 26.0f, 27.0f, 28.0f, 29.0f, 30.0f, 31.0f, 32.0f, 33.0f, 34.0f, 35.0f, 36.0f, 37.0f, 38.0f, 39.0f, 40.0f, 41.0f, 42.0f, 43.0f, 44.0f, 45.0f, 46.0f, 47.0f, 48.0f, 49.0f, 50.0f, 51.0f, 52.0f, 53.0f, 54.0f, 55.0f, 56.0f, 57.0f, 58.0f, 59.0f, 60.0}); x.reshapei('c', {3, 4, 5}); x.permutei({0, 1, 2}); x.streamline(); // x.printShapeInfo("{0, 1, 2} shape"); // x.printBuffer("{0, 1, 2} data"); ASSERT_TRUE(exp.isSameShape(&x)); ASSERT_TRUE(exp.equalsTo(&x)); } TEST_F(NDArrayCudaBasicsTests, Test_PermuteEquality_1) { auto x = NDArrayFactory::create<float>('c', {1, 60}); x.linspace(1); auto exp = NDArrayFactory::create<float>('c', {3, 4, 5}, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f, 25.0f, 26.0f, 27.0f, 28.0f, 29.0f, 30.0f, 31.0f, 32.0f, 33.0f, 34.0f, 35.0f, 36.0f, 37.0f, 38.0f, 39.0f, 40.0f, 41.0f, 42.0f, 43.0f, 44.0f, 45.0f, 46.0f, 47.0f, 48.0f, 49.0f, 50.0f, 51.0f, 52.0f, 53.0f, 54.0f, 55.0f, 56.0f, 57.0f, 58.0f, 59.0f, 60.0}); x.reshapei('c', {3, 4, 5}); x.permutei({0, 1, 2}); x.streamline(); // x.printShapeInfo("{0, 1, 2} shape"); // x.printBuffer("{0, 1, 2} data"); ASSERT_TRUE(exp.isSameShape(&x)); ASSERT_TRUE(exp.equalsTo(&x)); } TEST_F(NDArrayCudaBasicsTests, Test_PermuteEquality_2) { //auto x = NDArrayFactory::create<float>('c', {1, 60}); auto xx = NDArrayFactory::linspace<float>(1.f, 60.f, 60); //('c', {1, 60}); // auto x = *xx; //x.linspace(1); // auto exp = NDArrayFactory::create<float>('c', {3, 4, 5}, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f, 25.0f, 26.0f, 27.0f, 28.0f, 29.0f, 30.0f, 31.0f, 32.0f, 33.0f, 34.0f, 35.0f, 36.0f, 37.0f, 38.0f, 39.0f, 40.0f, 41.0f, 42.0f, 43.0f, 44.0f, 45.0f, 46.0f, 47.0f, 48.0f, 49.0f, 50.0f, 51.0f, 52.0f, 53.0f, 54.0f, 55.0f, 56.0f, 57.0f, 58.0f, 59.0f, 60.0}); // x.reshapei('c', {3, 4, 5}); // x.permutei({0, 1, 2}); // x.streamline(); // x.printShapeInfo("{0, 1, 2} shape"); // x.printBuffer("{0, 1, 2} data"); // ASSERT_TRUE(exp.isSameShape(&x)); // ASSERT_TRUE(exp.equalsTo(&x)); delete xx; } TEST_F(NDArrayCudaBasicsTests, Test_PermuteEquality_3) { auto x = NDArrayFactory::create<float>('c', {1, 60}); //x.linspace(1); for (int l = 0; l < x.lengthOf(); l++) x.p(l, float(l + 1.f)); auto exp = NDArrayFactory::create<float>('c', {3, 4, 5}, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f, 25.0f, 26.0f, 27.0f, 28.0f, 29.0f, 30.0f, 31.0f, 32.0f, 33.0f, 34.0f, 35.0f, 36.0f, 37.0f, 38.0f, 39.0f, 40.0f, 41.0f, 42.0f, 43.0f, 44.0f, 45.0f, 46.0f, 47.0f, 48.0f, 49.0f, 50.0f, 51.0f, 52.0f, 53.0f, 54.0f, 55.0f, 56.0f, 57.0f, 58.0f, 59.0f, 60.0}); x.reshapei('c', {3, 4, 5}); x.permutei({0, 1, 2}); x.streamline(); // x.printShapeInfo("{0, 1, 2} shape"); // x.printBuffer("{0, 1, 2} data"); ASSERT_TRUE(exp.isSameShape(&x)); ASSERT_TRUE(exp.equalsTo(&x)); } TEST_F(NDArrayCudaBasicsTests, Test_Empty_1) { auto x = NDArrayFactory::empty<float>(); ASSERT_TRUE(x.isActualOnHostSide()); ASSERT_TRUE(x.isEmpty()); } TEST_F(NDArrayCudaBasicsTests, Test_Empty_2) { auto x = NDArrayFactory::empty_<float>(); ASSERT_TRUE(x->isEmpty()); delete x; } TEST_F(NDArrayCudaBasicsTests, Test_Empty_3) { auto x = NDArrayFactory::empty(nd4j::DataType::FLOAT32); ASSERT_TRUE(x.isEmpty()); } TEST_F(NDArrayCudaBasicsTests, Test_Empty_4) { auto x = NDArrayFactory::empty_(nd4j::DataType::FLOAT32); ASSERT_TRUE(x->isEmpty()); delete x; }
b7b1345a52cd53c976b824b585ebd70e40d7baa7.cu
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // #include "testlayers.h" #include <NDArray.h> #include <NDArrayFactory.h> #include <Context.h> #include <Node.h> #include <graph/Variable.h> #include <graph/VariableSpace.h> #include <execution/LaunchContext.h> #include <specials_cuda.h> #include <TAD.h> #include <ops/declarable/CustomOperations.h> #include <cuda.h> using namespace nd4j; using namespace nd4j::graph; class NDArrayCudaBasicsTests : public testing::Test { public: }; ////////////////////////////////////////////////////////////////////////// static cudaError_t allocateDeviceMem(LaunchContext& lc, std::vector<void*>& devicePtrs, const std::vector<std::pair<void*,size_t>>& hostData) { if(devicePtrs.size() != hostData.size()) throw std::invalid_argument("prepareDataForCuda: two input sts::vectors should same sizes !"); cudaError_t cudaResult; void* reductionPointer; cudaResult = cudaMalloc(reinterpret_cast<void **>(&reductionPointer), 1024*1024); if(cudaResult != 0) return cudaResult; int* allocationPointer; cudaResult = cudaMalloc(reinterpret_cast<void **>(&allocationPointer), 1024*1024); if(cudaResult != 0) return cudaResult; lc.setReductionPointer(reductionPointer); lc.setAllocationPointer(allocationPointer); cudaStream_t stream = *lc.getCudaStream(); for(int i = 0; i < devicePtrs.size(); ++i) { cudaResult = cudaMalloc(reinterpret_cast<void **>(&devicePtrs[i]), hostData[i].second); if(cudaResult != 0) return cudaResult; cudaMemcpyAsync(devicePtrs[i], hostData[i].first, hostData[i].second, cudaMemcpyHostToDevice, stream); } return cudaResult; } TEST_F(NDArrayCudaBasicsTests, Test_Registration_1) { auto x = NDArrayFactory::create<int>('c', {5}, {1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<int>('c', {5}, {5, 4, 3, 2, 1}); ASSERT_TRUE(x.isActualOnDeviceSide()); ASSERT_FALSE(x.isActualOnHostSide()); } TEST_F(NDArrayCudaBasicsTests, Test_Registration_2) { auto x = NDArrayFactory::create<int>('c', {5}); auto y = NDArrayFactory::create<int>('c', {5}); ASSERT_TRUE(x.isActualOnDeviceSide()); ASSERT_FALSE(x.isActualOnHostSide()); } TEST_F(NDArrayCudaBasicsTests, Test_Registration_3) { auto x = NDArrayFactory::create<int>('c', {5}, {1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<int>('c', {5}, {5, 4, 3, 2, 1}); ASSERT_TRUE(x.isActualOnDeviceSide()); ASSERT_FALSE(x.isActualOnHostSide()); NDArray::registerSpecialUse({&x}, {&y}); ASSERT_TRUE(x.isActualOnDeviceSide()); ASSERT_FALSE(x.isActualOnHostSide()); ASSERT_TRUE(y.isActualOnDeviceSide()); ASSERT_FALSE(y.isActualOnHostSide()); } TEST_F(NDArrayCudaBasicsTests, Test_Registration_01) { auto x = NDArrayFactory::create_<int>('c', {5}, {1, 2, 3, 4, 5}); auto y = NDArrayFactory::create_<int>('c', {5}, {5, 4, 3, 2, 1}); ASSERT_TRUE(x->isActualOnDeviceSide()); ASSERT_FALSE(x->isActualOnHostSide()); delete x; delete y; } TEST_F(NDArrayCudaBasicsTests, Test_Registration_02) { auto x = NDArrayFactory::create_<int>('c', {5}); auto y = NDArrayFactory::create_<int>('c', {5}); ASSERT_TRUE(x->isActualOnDeviceSide()); ASSERT_FALSE(x->isActualOnHostSide()); delete x; delete y; } TEST_F(NDArrayCudaBasicsTests, Test_Registration_03) { auto x = NDArrayFactory::create_<int>('c', {5}, {1, 2, 3, 4, 5}); auto y = NDArrayFactory::create_<int>('c', {5}, {5, 4, 3, 2, 1}); ASSERT_TRUE(x->isActualOnDeviceSide()); ASSERT_FALSE(x->isActualOnHostSide()); NDArray::registerSpecialUse({y}, {x}); x->applyTransform(transform::Neg, *y); //ASSERT_TRUE(x->isActualOnDeviceSide()); //ASSERT_FALSE(x->isActualOnHostSide()); //ASSERT_TRUE(y->isActualOnDeviceSide()); //ASSERT_TRUE(y->isActualOnHostSide()); //y->syncToHost(); // y->printBuffer("Negatives"); delete x; delete y; } TEST_F(NDArrayCudaBasicsTests, Test_Cosine_1) { auto x = NDArrayFactory::create_<double>('c', {5}, {1, 2, 3, 4, 5}); auto y = NDArrayFactory::create_<double>('c', {5}, {5, 4, 3, 2, 1}); ASSERT_TRUE(x->isActualOnDeviceSide()); ASSERT_FALSE(x->isActualOnHostSide()); NDArray::registerSpecialUse({y}, {x}); x->applyTransform(transform::Cosine, *y); //ASSERT_TRUE(x->isActualOnDeviceSide()); //ASSERT_FALSE(x->isActualOnHostSide()); //ASSERT_TRUE(y->isActualOnDeviceSide()); //ASSERT_TRUE(y->isActualOnHostSide()); //y->syncToHost(); delete x; delete y; } ////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, TestAdd_1) { // allocating host-side arrays auto x = NDArrayFactory::create<double>('c', { 5 }, { 1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<double>('c', { 5 }, { 1, 2, 3, 4, 5}); auto z = NDArrayFactory::create<double>('c', { 5 }, {10, 10, 10, 10, 10}); auto exp = NDArrayFactory::create<double>('c', { 5 }, { 2, 4, 6, 8, 10 }); // making raw buffers //Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX; //cudaError_t res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = cudaMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); //ASSERT_EQ(0, res); Nd4jPointer nativeStream = (Nd4jPointer)malloc(sizeof(cudaStream_t)); CHECK_ALLOC(nativeStream, "Failed to allocate memory for new CUDA stream", sizeof(cudaStream_t)); cudaError_t dZ = cudaStreamCreate(reinterpret_cast<cudaStream_t *>(&nativeStream)); auto stream = reinterpret_cast<cudaStream_t *>(&nativeStream); //cudaMemcpyAsync(devBufferPtrX, x.buffer(), x.lengthOf() * x.sizeOfT(), cudaMemcpyHostToDevice, *stream); //cudaMemcpyAsync(devShapePtrX, x.shapeInfo(), shape::shapeInfoByteLength(x.shapeInfo()), cudaMemcpyHostToDevice, *stream); LaunchContext lc(stream, nullptr, nullptr); NativeOpExecutioner::execPairwiseTransform(&lc, pairwise::Add, x.buffer(), x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), y.buffer(), y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo(), z.buffer(), z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), nullptr); z.tickWriteDevice(); auto res = cudaStreamSynchronize(*stream); ASSERT_EQ(0, res); for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); } ////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, TestAdd_2) { // allocating host-side arrays NDArray x('c', { 5 }, { 1, 2, 3, 4, 5}); NDArray y('c', { 5 }, { 1, 2, 3, 4, 5}); NDArray z('c', { 5 }, nd4j::DataType::DOUBLE); NDArray exp('c', { 5 }, { 2, 4, 6, 8, 10 }); Nd4jPointer nativeStream = (Nd4jPointer)malloc(sizeof(cudaStream_t)); CHECK_ALLOC(nativeStream, "Failed to allocate memory for new CUDA stream", sizeof(cudaStream_t)); cudaError_t dZ = cudaStreamCreate(reinterpret_cast<cudaStream_t *>(&nativeStream)); auto stream = reinterpret_cast<cudaStream_t *>(&nativeStream); LaunchContext lc(stream, *stream, nullptr, nullptr); NativeOpExecutioner::execPairwiseTransform(&lc, pairwise::Add, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), nullptr); auto res = cudaStreamSynchronize(*stream); ASSERT_EQ(0, res); for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); } ////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, TestAdd_3) { // allocating host-side arrays auto x = NDArrayFactory::create<double>('c', { 5 }, { 1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<double>('c', { 5 }, { 1, 2, 3, 4, 5}); auto z = NDArrayFactory::create<double>('c', { 5 }, {10, 10, 10, 10, 10}); auto exp = NDArrayFactory::create<double>('c', { 5 }, { 2, 4, 6, 8, 10 }); // making raw buffers //Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX; //cudaError_t res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = cudaMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); //ASSERT_EQ(0, res); Nd4jPointer nativeStream = (Nd4jPointer)malloc(sizeof(cudaStream_t)); CHECK_ALLOC(nativeStream, "Failed to allocate memory for new CUDA stream", sizeof(cudaStream_t)); cudaError_t dZ = cudaStreamCreate(reinterpret_cast<cudaStream_t *>(&nativeStream)); auto stream = reinterpret_cast<cudaStream_t *>(&nativeStream); //cudaMemcpyAsync(devBufferPtrX, x.buffer(), x.lengthOf() * x.sizeOfT(), cudaMemcpyHostToDevice, *stream); //cudaMemcpyAsync(devShapePtrX, x.shapeInfo(), shape::shapeInfoByteLength(x.shapeInfo()), cudaMemcpyHostToDevice, *stream); LaunchContext lc(stream, *stream, nullptr, nullptr); NativeOpExecutioner::execPairwiseTransform(&lc, pairwise::Add, x.buffer(), x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), y.buffer(), y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo(), z.buffer(), z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), nullptr); z.tickWriteDevice(); auto res = cudaStreamSynchronize(*stream); ASSERT_EQ(0, res); //double* localBuffer = ; z.syncToHost(); cudaMemcpy(z.buffer(), z.specialBuffer(), z.lengthOf() * z.sizeOfT(), cudaMemcpyDeviceToHost); res = cudaStreamSynchronize(*stream); z.tickWriteHost(); ASSERT_EQ(0, res); // // cudaFree(devBufferPtrX); //cudaFree(devBufferPtrZ); //cudaFree(devShapePtrX); for (int e = 0; e < z.lengthOf(); e++) { ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); } } ////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, TestAdd_4) { // allocating host-side arrays auto x = NDArrayFactory::create<double>('c', { 5 }, { 1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<double>('c', { 5 }, { 1, 2, 3, 4, 5}); auto z = NDArrayFactory::create<double>('c', { 5 }); auto exp = NDArrayFactory::create<double>('c', { 5 }, { 2, 4, 6, 8, 10 }); // making raw buffers //Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX; //cudaError_t res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = cudaMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); //ASSERT_EQ(0, res); x.applyPairwiseTransform(pairwise::Add, y, z); // // cudaFree(devBufferPtrX); //cudaFree(devBufferPtrZ); //cudaFree(devShapePtrX); for (int e = 0; e < z.lengthOf(); e++) { ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); } } ////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, TestAdd_5) { // allocating host-side arrays auto x = NDArrayFactory::create<double>('c', { 5 }, { 1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<double>('c', { 5 }, { 1, 2, 3, 4, 5}); //auto z = NDArrayFactory::create<double>('c', { 5 }); auto exp = NDArrayFactory::create<double>('c', { 5 }, { 2, 4, 6, 8, 10 }); // making raw buffers //Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX; //cudaError_t res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = cudaMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); //ASSERT_EQ(0, res); x += y; //x.applyPairwiseTransform(pairwise::Add, &y, &z, nullptr); x.syncToHost(); //y.printBuffer("3Y = "); //z.printBuffer("3Result out"); // // cudaFree(devBufferPtrX); //cudaFree(devBufferPtrZ); //cudaFree(devShapePtrX); for (int e = 0; e < x.lengthOf(); e++) { ASSERT_NEAR(exp.e<double>(e), x.e<double>(e), 1e-5); } } ////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, TestAdd_6) { // allocating host-side arrays auto x = NDArrayFactory::create<double>('c', { 5 }, { 1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<double>(2); //.'c', { 5 }, { 1, 2, 3, 4, 5}); //auto z = NDArrayFactory::create<double>('c', { 5 }); auto exp = NDArrayFactory::create<double>('c', { 5 }, { 3, 4, 5, 6, 7 }); // making raw buffers //Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX; //cudaError_t res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = cudaMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); //ASSERT_EQ(0, res); x += y; //x.applyPairwiseTransform(pairwise::Add, &y, &z, nullptr); x.syncToHost(); // cudaFree(devBufferPtrX); //cudaFree(devBufferPtrZ); //cudaFree(devShapePtrX); for (int e = 0; e < x.lengthOf(); e++) { ASSERT_NEAR(exp.e<double>(e), x.e<double>(e), 1e-5); } } ////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, TestAdd_7) { // allocating host-side arrays auto x = NDArrayFactory::create<double>('c', { 5 }, { 1, 2, 3, 4, 5}); //auto y = NDArrayFactory::create<double>(2); //.'c', { 5 }, { 1, 2, 3, 4, 5}); //auto z = NDArrayFactory::create<double>('c', { 5 }); auto exp = NDArrayFactory::create<double>('c', { 5 }, { 3, 4, 5, 6, 7 }); // making raw buffers //Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX; //cudaError_t res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = cudaMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); //ASSERT_EQ(0, res); x += 2.; //x.applyPairwiseTransform(pairwise::Add, &y, &z, nullptr); x.syncToHost(); // cudaFree(devBufferPtrX); //cudaFree(devBufferPtrZ); //cudaFree(devShapePtrX); for (int e = 0; e < x.lengthOf(); e++) { ASSERT_NEAR(exp.e<double>(e), x.e<double>(e), 1e-5); } } ////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, TestMultiply_1) { // allocating host-side arrays auto x = NDArrayFactory::create<double>('c', { 5 }, { 1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<double>('c', { 5 }, { 1, 2, 3, 4, 5}); auto z = NDArrayFactory::create<double>('c', { 5 }); auto exp = NDArrayFactory::create<double>('c', { 5 }, { 1, 4, 9, 16, 25 }); // making raw buffers //Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX; //cudaError_t res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = cudaMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); //ASSERT_EQ(0, res); x.applyPairwiseTransform(pairwise::Multiply, y, z); // x.printBuffer("3X = "); // y.printBuffer("3Y = "); // z.printBuffer("3Result out"); // // cudaFree(devBufferPtrX); //cudaFree(devBufferPtrZ); //cudaFree(devShapePtrX); for (int e = 0; e < z.lengthOf(); e++) { ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); } } ////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, TestMultiply_2) { // allocating host-side arrays auto x = NDArrayFactory::create<double>('c', { 5 }, { 1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<double>('c', { 5 }, { 1, 2, 3, 4, 5}); NDArray z('c', { 5 }, nd4j::DataType::DOUBLE); auto exp = NDArrayFactory::create<double>('c', { 5 }, { 1, 4, 9, 16, 25 }); // making raw buffers //Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX; //cudaError_t res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = cudaMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); //ASSERT_EQ(0, res); x.applyPairwiseTransform(pairwise::Multiply, y, z); // // cudaFree(devBufferPtrX); //cudaFree(devBufferPtrZ); //cudaFree(devShapePtrX); for (int e = 0; e < z.lengthOf(); e++) { ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); } } ////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, TestMultiply_3) { // allocating host-side arrays NDArray x('c', { 5 }, { 1, 2, 3, 4, 5}, nd4j::DataType::DOUBLE); NDArray y('c', { 5 }, { 1., 2., 3., 4., 5.}, nd4j::DataType::DOUBLE); auto z = NDArrayFactory::create<double>('c', { 5 }); auto exp = NDArrayFactory::create<double>('c', { 5 }, { 1, 4, 9, 16, 25 }); // making raw buffers //Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX; //cudaError_t res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = cudaMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); //ASSERT_EQ(0, res); x.applyPairwiseTransform(pairwise::Multiply, y, z); //x.printBuffer("23X = "); //y.printBuffer("23Y = "); // z.printBuffer("23Result out"); // // cudaFree(devBufferPtrX); //cudaFree(devBufferPtrZ); //cudaFree(devShapePtrX); for (int e = 0; e < z.lengthOf(); e++) { ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); } } ////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, TestMultiply_4) { // allocating host-side arrays NDArray x('c', { 5 }, { 1, 2, 3, 4, 5}, nd4j::DataType::DOUBLE); NDArray y('c', { 5 }, { 1., 2., 3., 4., 5.}, nd4j::DataType::DOUBLE); //auto z = NDArrayFactory::create<double>('c', { 5 }); auto exp = NDArrayFactory::create<double>('c', { 5 }, { 1, 4, 9, 16, 25 }); // making raw buffers //Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX; //cudaError_t res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = cudaMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); //ASSERT_EQ(0, res); //x.applyPairwiseTransform(pairwise::Multiply, &y, &z, nullptr); //x.printBuffer("23X = "); //y.printBuffer("23Y = "); x *= y; //x.tickWriteDevice(); // x.printBuffer("33Result out"); // // cudaFree(devBufferPtrX); //cudaFree(devBufferPtrZ); //cudaFree(devShapePtrX); for (int e = 0; e < x.lengthOf(); e++) { ASSERT_NEAR(exp.e<double>(e), x.e<double>(e), 1e-5); } } ////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, TestPrimitiveNeg_01) { // allocating host-side arrays auto x = NDArrayFactory::create<int>('c', { 5 }, { 1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<int>('c', { 5 }, { 1, 2, 3, 4, 5}); auto exp = NDArrayFactory::create<int>('c', { 5 }, { -1, -2, -3, -4, -5 }); auto stream = x.getContext()->getCudaStream();//reinterpret_cast<cudaStream_t *>(&nativeStream); NativeOpExecutioner::execTransformSame(x.getContext(), transform::Neg, x.buffer(), x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), y.buffer(), y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo(), nullptr, nullptr, nullptr); auto res = cudaStreamSynchronize(*stream); ASSERT_EQ(0, res); y.tickWriteDevice(); // x.printBuffer("X = "); // y.printBuffer("Y = "); for (int e = 0; e < y.lengthOf(); e++) { ASSERT_NEAR(exp.e<int>(e), y.e<int>(e), 1e-5); } } TEST_F(NDArrayCudaBasicsTests, Test_PrimitiveNeg_2) { auto x = NDArrayFactory::create<double>('c', {5}, {1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<double>('c', {5}); ASSERT_TRUE(x.isActualOnDeviceSide()); ASSERT_FALSE(x.isActualOnHostSide()); x.applyTransform(transform::Neg, y); //ASSERT_TRUE(x->isActualOnDeviceSide()); //ASSERT_FALSE(x->isActualOnHostSide()); //ASSERT_TRUE(y->isActualOnDeviceSide()); //ASSERT_TRUE(y->isActualOnHostSide()); //auto res = cudaStreamSynchronize(*y.getContext()->getCudaStream()); //ASSERT_EQ(0, res); // y.printBuffer("Negatives2"); //delete x; //delete y; } TEST_F(NDArrayCudaBasicsTests, Test_PrimitiveSqrt_1) { // strict auto x = NDArrayFactory::create<double>('c', {5}, {1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<double>('c', {5}); auto exp = NDArrayFactory::create<double>({1.000000, 1.414214, 1.732051, 2.000000, 2.236068}); ASSERT_TRUE(x.isActualOnDeviceSide()); ASSERT_FALSE(x.isActualOnHostSide()); x.applyTransform(transform::Sqrt, y); //ASSERT_TRUE(x->isActualOnDeviceSide()); //ASSERT_FALSE(x->isActualOnHostSide()); //ASSERT_TRUE(y->isActualOnDeviceSide()); //ASSERT_TRUE(y->isActualOnHostSide()); //auto res = cudaStreamSynchronize(*y.getContext()->getCudaStream()); //ASSERT_EQ(0, res); ASSERT_TRUE(y.equalsTo(exp)); //y.printBuffer("SQRT output"); //delete x; //delete y; } TEST_F(NDArrayCudaBasicsTests, Test_PrimitiveAssign_1) { // strict auto x = NDArrayFactory::create<double>('c', {5}, {1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<double>('c', {5}); //auto exp = NDArrayFactory::create<double>({1.000000, 1.414214, 1.732051, 2.000000, 2.236068}); //ASSERT_TRUE(x.isActualOnDeviceSide()); //ASSERT_TRUE(x.isActualOnHostSide()); x.applyTransform(transform::Assign, y); //ASSERT_TRUE(x->isActualOnDeviceSide()); //ASSERT_FALSE(x->isActualOnHostSide()); //ASSERT_TRUE(y->isActualOnDeviceSide()); //ASSERT_TRUE(y->isActualOnHostSide()); //auto res = cudaStreamSynchronize(*y.getContext()->getCudaStream()); //ASSERT_EQ(0, res); // printf("Assigned to another array\n"); // y.printBuffer("OUput"); ASSERT_TRUE(y.equalsTo(x)); //y.syncToHost(); //y.printBuffer("IsMax output"); //delete x; //delete y; } TEST_F(NDArrayCudaBasicsTests, Test_PrimitiveCosine_1) { // strict auto x = NDArrayFactory::create<double>('c', {5}, {1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<double>('c', {5}); auto exp = NDArrayFactory::create<double>('c', {5}, {0.540302, -0.416147, -0.989992, -0.653644, 0.283662}); ASSERT_TRUE(x.isActualOnDeviceSide()); ASSERT_FALSE(x.isActualOnHostSide()); x.applyTransform(transform::Cosine, y); //ASSERT_TRUE(x->isActualOnDeviceSide()); //ASSERT_FALSE(x->isActualOnHostSide()); //ASSERT_TRUE(y->isActualOnDeviceSide()); //ASSERT_TRUE(y->isActualOnHostSide()); //auto res = cudaStreamSynchronize(*y.getContext()->getCudaStream()); //ASSERT_EQ(0, res); ASSERT_TRUE(exp.isSameShape(y)); ASSERT_TRUE(exp.dataType() == y.dataType()); //y.printBuffer("Cosine2"); //delete x; //delete y; } TEST_F(NDArrayCudaBasicsTests, Test_PrimitiveCosine_2) { auto x = NDArrayFactory::create<double>('c', {5}, {1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<double>('c', {5}); auto exp = NDArrayFactory::create<double>('c', {5}, {0.540302, -0.416147, -0.989992, -0.653644, 0.283662}); ASSERT_TRUE(x.isActualOnDeviceSide()); ASSERT_FALSE(x.isActualOnHostSide()); x.applyTransform(transform::Cosine, y); //ASSERT_TRUE(x->isActualOnDeviceSide()); //ASSERT_FALSE(x->isActualOnHostSide()); //ASSERT_TRUE(y->isActualOnDeviceSide()); //ASSERT_TRUE(y->isActualOnHostSide()); //auto res = cudaStreamSynchronize(*y.getContext()->getCudaStream()); //ASSERT_EQ(0, res); //exp.syncToHost(); //y.printBuffer("PrimitiveCosine2"); //exp.printBuffer("Primitive Cosine exp"); ASSERT_TRUE(exp.isSameShape(y)); ASSERT_TRUE(exp.dataType() == y.dataType()); //for (int e = 0; e < y.lengthOf(); e++) { // ASSERT_NEAR(exp.e<double>(e), y.e<double>(e), 1e-5); //} ASSERT_TRUE(exp.equalsTo(y)); //delete x; //delete y; } TEST_F(NDArrayCudaBasicsTests, Test_PrimitiveCosine_3) { auto x = NDArrayFactory::create<double>('c', {5}, {1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<double>('c', {5}); auto exp = NDArrayFactory::create<double>({0.540302, -0.416147, -0.989992, -0.653644, 0.283662}); ASSERT_TRUE(x.isActualOnDeviceSide()); ASSERT_FALSE(x.isActualOnHostSide()); x.applyTransform(transform::Cosine, y); //ASSERT_TRUE(x->isActualOnDeviceSide()); //ASSERT_FALSE(x->isActualOnHostSide()); //ASSERT_TRUE(y->isActualOnDeviceSide()); //ASSERT_TRUE(y->isActualOnHostSide()); //auto res = cudaStreamSynchronize(*y.getContext()->getCudaStream()); //ASSERT_EQ(0, res); //exp.syncToHost(); // y.printBuffer("PrimitiveCosine3"); // exp.printBuffer("Primitive Cosine3 exp"); // y.printShapeInfo("Y shape"); // exp.printShapeInfo("Exp Shape"); ASSERT_TRUE(exp.isSameShape(y)); // // for (int e = 0; e < y.lengthOf(); e++) { // printf("%lf == %lf\n", exp.e<double>(e), y.e<double>(e)); //// ASSERT_NEAR(exp.e<double>(e), y.e<double>(e), 1e-5); // } ASSERT_TRUE(exp.equalsTo(y)); //delete x; //delete y; } TEST_F(NDArrayCudaBasicsTests, TestRawBroadcast_2) { //if (!Environment::getInstance()->isExperimentalBuild()) // return; NDArray x = NDArrayFactory::create<double>('c', {2,3,4}); NDArray y('c', {2,4}, {10,20,30,40,50,60,70,80}, nd4j::DataType::DOUBLE); NDArray z('c', {2,3,4}, {100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100}, nd4j::DataType::DOUBLE); // NDArray exp('c', {2,3,4}, {10., 21., 32., 43., 14., 25., 36., 47., 18., 29., 40., 51., 62., 73., 84., 95., 66., 77., 88., 99., 70., 81., 92., 103}, nd4j::DataType::DOUBLE); NDArray exp('c', {2,3,4}, {10., 40., 90., 160., 50., 120., 210., 320., 90., 200., 330., 480., 650., 840., 1050., 1280., 850., 1080., 1330., 1600., 1050., 1320., 1610., 1920.}, nd4j::DataType::DOUBLE); x.linspace(1); x.syncToDevice(); std::vector<int> dimensions = {0,2}; // evaluate xTad data shape::TAD xTad; xTad.init(x.getShapeInfo(), dimensions.data(), dimensions.size()); xTad.createTadOnlyShapeInfo(); xTad.createOffsets(); // prepare input arrays for prepareDataForCuda function std::vector<std::pair<void*,size_t>> hostData; hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo)); // 1 -- xTadShapeInfo hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets std::vector<void*> devicePtrs(hostData.size(), nullptr); // create cuda stream and LaunchContext cudaError_t cudaResult; cudaStream_t stream; cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // allocate required amount of global device memory and copy host data to it cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult); // call cuda kernel which calculates result NativeOpExecutioner::execBroadcast(&lc, nd4j::broadcast::Multiply, nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, y.getShapeInfo(), y.specialBuffer(), y.specialShapeInfo(), nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(), (int*)devicePtrs[0], dimensions.size(), (Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2], nullptr, nullptr); cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // free allocated global device memory for(int i = 0; i < devicePtrs.size(); ++i) cudaFree(devicePtrs[i]); // delete cuda stream cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } TEST_F(NDArrayCudaBasicsTests, TestRawBroadcast_3) { //if (!Environment::getInstance()->isExperimentalBuild()) // return; NDArray x('c', {2,3,4}, nd4j::DataType::DOUBLE); NDArray y('c', {2,4}, {10,20,30,40,50,60,70,80}, nd4j::DataType::DOUBLE); NDArray z('c', {2,3,4}, {100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100}, nd4j::DataType::DOUBLE); // NDArray exp('c', {2,3,4}, {10., 21., 32., 43., 14., 25., 36., 47., 18., 29., 40., 51., 62., 73., 84., 95., 66., 77., 88., 99., 70., 81., 92., 103}, nd4j::DataType::DOUBLE); NDArray exp('c', {2,3,4}, {10., 40., 90., 160., 50., 120., 210., 320., 90., 200., 330., 480., 650., 840., 1050., 1280., 850., 1080., 1330., 1600., 1050., 1320., 1610., 1920.}, nd4j::DataType::DOUBLE); x.linspace(1); x.syncToDevice(); std::vector<int> dimensions = {0,2}; // evaluate xTad data shape::TAD xTad; xTad.init(x.getShapeInfo(), dimensions.data(), dimensions.size()); xTad.createTadOnlyShapeInfo(); xTad.createOffsets(); // prepare input arrays for prepareDataForCuda function std::vector<std::pair<void*,size_t>> hostData; hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo)); // 1 -- xTadShapeInfo hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets std::vector<void*> devicePtrs(hostData.size(), nullptr); // create cuda stream and LaunchContext cudaError_t cudaResult; //cudaStream_t stream; //cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext* pLc = x.getContext();//(&stream); cudaStream_t* stream = pLc->getCudaStream(); // allocate required amount of global device memory and copy host data to it // cudaResult = allocateDeviceMem(*pLc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult); for(int i = 0; i < devicePtrs.size(); ++i) { cudaResult = cudaMalloc(reinterpret_cast<void **>(&devicePtrs[i]), hostData[i].second); ASSERT_EQ(0, cudaResult); cudaMemcpyAsync(devicePtrs[i], hostData[i].first, hostData[i].second, cudaMemcpyHostToDevice, *stream); } NDArray::registerSpecialUse({&z}, {&x, &y}); // call cuda kernel which calculates result NativeOpExecutioner::execBroadcast(pLc, nd4j::broadcast::Multiply, nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, y.getShapeInfo(), y.specialBuffer(), y.specialShapeInfo(), nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(), (int*)devicePtrs[0], dimensions.size(), (Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2], nullptr, nullptr); //cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); //z.syncToHost(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // free allocated global device memory for(int i = 0; i < devicePtrs.size(); ++i) cudaFree(devicePtrs[i]); ASSERT_TRUE(exp.equalsTo(z)); // delete cuda stream //cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } TEST_F(NDArrayCudaBasicsTests, TestBroadcastMultiply_1) { // allocating host-side arrays NDArray x('c', { 2, 3 }, { 1, 2, 3, 4, 5, 6}, nd4j::DataType::DOUBLE); NDArray y = NDArrayFactory::create<double>(3.); //'c', { 3 }, { 2., 3., 4.}, nd4j::DataType::DOUBLE); //auto z = NDArrayFactory::create<double>('c', { 5 }); auto exp = NDArrayFactory::create<double>('c', { 2, 3 }, { 3, 6, 9, 12, 15, 18 }); // making raw buffers //Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX; //cudaError_t res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = cudaMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); //ASSERT_EQ(0, res); //x.applyPairwiseTransform(pairwise::Multiply, &y, &z, nullptr); x *= y; //x.syncToHost(); // // cudaFree(devBufferPtrX); //cudaFree(devBufferPtrZ); //cudaFree(devShapePtrX); ASSERT_TRUE(exp.equalsTo(x)); // for (int e = 0; e < x.lengthOf(); e++) { // ASSERT_NEAR(exp.e<double>(e), x.e<double>(e), 1e-5); // } } TEST_F(NDArrayCudaBasicsTests, TestBroadcastMultiply_01) { // allocating host-side arrays NDArray x('c', { 2, 3 }, { 1, 2, 3, 4, 5, 6}, nd4j::DataType::DOUBLE); NDArray y = NDArrayFactory::create<double>(3.); //'c', { 3 }, { 2., 3., 4.}, nd4j::DataType::DOUBLE); auto z = NDArrayFactory::create<double>('c', { 2, 3 }); auto exp = NDArrayFactory::create<double>('c', { 2, 3 }, { 3, 6, 9, 12, 15, 18 }); // making raw buffers //Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX; //cudaError_t res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = cudaMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); //ASSERT_EQ(0, res); //x.applyPairwiseTransform(pairwise::Multiply, &y, &z, nullptr); //x.printBuffer("23X = "); //y.printBuffer("23Y = "); x.applyTrueBroadcast(BroadcastOpsTuple::Multiply(), y, z);// *= y; // z.printBuffer("53Result out"); // // cudaFree(devBufferPtrX); //cudaFree(devBufferPtrZ); //cudaFree(devShapePtrX); ASSERT_TRUE(exp.equalsTo(z)); // for (int e = 0; e < x.lengthOf(); e++) { // ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // } } TEST_F(NDArrayCudaBasicsTests, TestBroadcastMultiply_02) { // allocating host-side arrays auto x = NDArrayFactory::create<double>('c', { 2, 3 }, { 1, 2, 3, 4, 5, 6}); //, nd4j::DataType::DOUBLE); auto y = NDArrayFactory::create<double>('c', {2,3}, {3, 3, 3, 3, 3, 3}); //'c', { 3 }, { 2., 3., 4.}, nd4j::DataType::DOUBLE); auto z = NDArrayFactory::create<double>('c', { 2, 3 }); auto exp = NDArrayFactory::create<double>('c', { 2, 3 }, { 3, 6, 9, 12, 15, 18 }); //if (x.isActualOnHostSide() && !x.isActualOnDeviceSide()) // making raw buffers //Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX; //cudaError_t res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = cudaMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); //ASSERT_EQ(0, res); //x.applyPairwiseTransform(pairwise::Multiply, &y, &z, nullptr); //x.printBuffer("23X = "); //y.printBuffer("23Y = "); x.applyTrueBroadcast(BroadcastOpsTuple::Multiply(), y, z);// *= y; // z.printBuffer("52Result out"); // // cudaFree(devBufferPtrX); //cudaFree(devBufferPtrZ); //cudaFree(devShapePtrX); ASSERT_TRUE(exp.equalsTo(z)); // for (int e = 0; e < x.lengthOf(); e++) { // ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // } } TEST_F(NDArrayCudaBasicsTests, TestBroadcastMultiply_002) { // allocating host-side arrays auto x = NDArrayFactory::create<double>('c', { 2, 3 }, { 1, 2, 3, 4, 5, 6}); //, nd4j::DataType::DOUBLE); auto y = NDArrayFactory::create<double>('c', {2, 3}, {2., 3., 3., 3., 3., 3.}); //'c', { 3 }, { 2., 3., 4.}, nd4j::DataType::DOUBLE); auto z = NDArrayFactory::create<double>('c', { 2, 3 }); auto exp = NDArrayFactory::create<double>('c', { 2, 3 }, { 2, 6, 9, 12, 15, 18 }); //if (x.isActualOnHostSide() && !x.isActualOnDeviceSide()) // making raw buffers //Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX; //cudaError_t res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = cudaMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); //ASSERT_EQ(0, res); //x.applyPairwiseTransform(pairwise::Multiply, &y, &z, nullptr); //x.printBuffer("23X = "); //y.printBuffer("23Y = "); x.applyPairwiseTransform(pairwise::Multiply, y, z);// *= y; // z.printBuffer("51Result out"); // // cudaFree(devBufferPtrX); //cudaFree(devBufferPtrZ); //cudaFree(devShapePtrX); ASSERT_TRUE(exp.equalsTo(z)); // for (int e = 0; e < x.lengthOf(); e++) { // ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // } } //////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, TestBroadcastRaw_1) { //if (!Environment::getInstance()->isExperimentalBuild()) // return; NDArray x('c', {2,3,4}, {100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100}, nd4j::DataType::INT32); NDArray y('c', {3}, {10, 20, 30}, nd4j::DataType::INT64); NDArray z('c', {2,3,4}, {100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100}, nd4j::DataType::INT32); NDArray exp('c', {2,3,4}, {10, 11, 12, 13,24, 25, 26, 27,38, 39, 40, 41,22, 23, 24, 25,36, 37, 38, 39,50, 51, 52, 53}, nd4j::DataType::INT32); //real output [10, 11, 12, 13, 4, 5, 6, 7, 28, 29, 30, 31, 22, 23, 24, 25, 16, 17, 18, 19, 40, 41, 42, 43] x.linspace(0); x.syncToDevice(); std::vector<int> dimensions = {1}; // evaluate xTad data shape::TAD xTad; xTad.init(x.getShapeInfo(), dimensions.data(), dimensions.size()); xTad.createTadOnlyShapeInfo(); xTad.createOffsets(); // prepare input arrays for prepareDataForCuda function std::vector<std::pair<void*,size_t>> hostData; hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(Nd4jLong)); // 0 -- dimensions hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo)); // 1 -- xTadShapeInfo hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets std::vector<void*> devicePtrs(hostData.size(), nullptr); // create cuda stream and LaunchContext cudaError_t cudaResult; cudaStream_t* stream = x.getContext()->getCudaStream(); LaunchContext* pLc = x.getContext(); // allocate required amount of global device memory and copy host data to it //cudaResult = allocateDeviceMem(*pLc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult); for(size_t i = 0; i < devicePtrs.size(); ++i) { cudaResult = cudaMalloc(&devicePtrs[i], hostData[i].second); //if(cudaResult != 0) return cudaResult; ASSERT_EQ(cudaResult, 0); cudaMemcpy(devicePtrs[i], hostData[i].first, hostData[i].second, cudaMemcpyHostToDevice); } // call cuda kernel which calculates result NativeOpExecutioner::execBroadcast(pLc, nd4j::broadcast::Add, nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, y.getShapeInfo(), y.specialBuffer(), y.specialShapeInfo(), nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(), (int*)devicePtrs[0], dimensions.size(), (Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2], nullptr, nullptr); cudaResult = cudaStreamSynchronize(*stream); ASSERT_EQ(0, cudaResult); // x.printIndexedBuffer(" X"); // y.printIndexedBuffer("+Y"); // z.printBuffer("ADD broadcasted output"); // verify results // for (int e = 0; e < z.lengthOf(); e++) // ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // free allocated global device memory for(int i = 0; i < devicePtrs.size(); ++i) cudaFree(devicePtrs[i]); // delete cuda stream //cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } TEST_F(NDArrayCudaBasicsTests, TestBroadcastMultiply) { // allocating host-side arrays NDArray x('c', { 2, 3 }, { 1, 2, 3, 4, 5, 6}, nd4j::DataType::DOUBLE); NDArray y('c', { 3 }, { 2., 3., 4.}, nd4j::DataType::DOUBLE); //auto z = NDArrayFactory::create<double>('c', { 5 }); auto exp = NDArrayFactory::create<double>('c', { 2, 3 }, { 2, 6, 12, 8, 15, 24 }); // making raw buffers //Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX; //cudaError_t res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = cudaMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); //ASSERT_EQ(0, res); //x.applyPairwiseTransform(pairwise::Multiply, &y, &z, nullptr); //x.printBuffer("23X = "); //y.printBuffer("23Y = "); x *= y; // // cudaFree(devBufferPtrX); //cudaFree(devBufferPtrZ); //cudaFree(devShapePtrX); //for (int e = 0; e < x.lengthOf(); e++) { // ASSERT_NEAR(exp.e<double>(e), x.e<double>(e), 1e-5); //} } TEST_F(NDArrayCudaBasicsTests, TestBroadcastMultiply_2) { // allocating host-side arrays NDArray x('c', { 2, 3 }, { 1, 2, 3, 4, 5, 6}, nd4j::DataType::DOUBLE); NDArray y('c', { 3 }, { 2., 3., 4.}, nd4j::DataType::DOUBLE); //auto z = NDArrayFactory::create<double>('c', { 5 }); auto exp = NDArrayFactory::create<double>('c', { 2, 3 }, { 11,12, 13,14, 15, 16 }); auto expZ = NDArrayFactory::create<double>('c', { 2, 3 }, { 2, 6, 12, 8, 15, 24 }); // making raw buffers //Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX; //cudaError_t res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = cudaMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); //ASSERT_EQ(0, res); //x.applyPairwiseTransform(pairwise::Multiply, &y, &z, nullptr); //x.printBuffer("23X = "); //y.printBuffer("23Y = "); //void NDArray::applyTrueBroadcast(nd4j::BroadcastOpsTuple op, const NDArray* other, NDArray* target, const bool checkTargetShape, ExtraArguments *extraArgs) x.applyTrueBroadcast(BroadcastOpsTuple::Multiply(), y, exp); // // cudaFree(devBufferPtrX); //cudaFree(devBufferPtrZ); //cudaFree(devShapePtrX); //for (int e = 0; e < x.lengthOf(); e++) { // ASSERT_NEAR(exp.e<double>(e), x.e<double>(e), 1e-5); //} ASSERT_TRUE(exp.equalsTo(expZ)); } ////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, TestReduceSum_1) { // allocating host-side arrays auto x = NDArrayFactory::create<double>('c', { 5 }, { 1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<double>(15); auto exp = NDArrayFactory::create<double>(15); auto stream = x.getContext()->getCudaStream();//reinterpret_cast<cudaStream_t *>(&nativeStream); NativeOpExecutioner::execReduceSameScalar(x.getContext(), reduce::Sum, x.buffer(), x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, y.buffer(), y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo()); auto res = cudaStreamSynchronize(*stream); ASSERT_EQ(0, res); y.syncToHost(); ASSERT_NEAR(y.e<double>(0), 15, 1e-5); } ////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, TestDup1) { NDArray array('c', {2,3}, {1,2,3,4,5,6}); auto arrC = array.dup('c'); auto arrF = array.dup('f'); // arrC->printBuffer("arrC"); // arrF->printBuffer("arrF"); //arrC->printShapeInfo("C shape"); //arrF->printShapeInfo("F shape"); ASSERT_TRUE(array.equalsTo(arrF)); ASSERT_TRUE(array.equalsTo(arrC)); ASSERT_TRUE(arrF.equalsTo(arrC)); } ////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, equalsTo_1) { NDArray x('c', {2,5}, {1,2,3,4,5,6,7,8,9,10}, nd4j::DataType::DOUBLE); NDArray y('c', {2,5}, {1,2,3,4,5,6,7,8,9,10}, nd4j::DataType::DOUBLE); ASSERT_TRUE(x.equalsTo(y)); x.permutei({1,0}); y.permutei({1,0}); ASSERT_TRUE(x.equalsTo(y)); } ////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, equalsTo_2) { NDArray x('c', {2,5}, {1,2,3,4,5,6,7,8,10,10}, nd4j::DataType::DOUBLE); NDArray y('c', {2,5}, {1,2,5,4,5,6,7,8,9,10}, nd4j::DataType::DOUBLE); ASSERT_FALSE(x.equalsTo(y)); x.permutei({1,0}); y.permutei({1,0}); ASSERT_FALSE(x.equalsTo(y)); } ////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, equalsTo_3) { NDArray x('c', {2,5}, {1,2,3,4,5,6,7,8,9,10}, nd4j::DataType::DOUBLE); NDArray y('c', {2,5}, {1.f,2.f,3.f,4.f,5.f,6.f,7.f,8.f,9.f,10.f}, nd4j::DataType::FLOAT32); ASSERT_FALSE(x.equalsTo(y)); x.permutei({1,0}); y.permutei({1,0}); ASSERT_FALSE(x.equalsTo(y)); } //////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, applyReduce3_1) { NDArray x('c', {2,3,4}, {-10,-9,-8,-7,-6,-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7,8,9,10,11,12,13}, nd4j::DataType::INT32); NDArray x2('c', {2,3,4}, {-10,-9,-8,-7,-6,-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7,8,9,10,11,12,13}, nd4j::DataType::INT32); NDArray y('c', {2,3,4}, {-2,3,-4,5,-2,3,-4,5,-2,3,-4,5,-2,3,-4,5,-2,3,-4,5,-2,3,-4,5}, nd4j::DataType::INT32); NDArray k('c', {2,3}, {-2,3,-4,5,-2,3}, nd4j::DataType::INT32); NDArray k2('c', {3,2}, {-2,3,-4,5,-2,3}, nd4j::DataType::INT32); NDArray exp1('c', {3}, {4.f, 20.f, 36.f}, nd4j::DataType::FLOAT32); NDArray exp2('c', {2,3}, {-10.f, -2.f, 6.f,14.f, 22.f, 30.f}, nd4j::DataType::FLOAT32); NDArray exp3('c', {4}, {38.f, 41.f, 44.f, 47.f}, nd4j::DataType::FLOAT32); NDArray exp4('c', {4}, {114.f, 117.f, 120.f, 123.f}, nd4j::DataType::FLOAT32); NDArray z = x.applyReduce3(nd4j::reduce3::Dot, y, {0,2}); ASSERT_TRUE(z.equalsTo(&exp1)); z = x.applyReduce3(nd4j::reduce3::Dot, k, {0,1}); ASSERT_TRUE(z.equalsTo(&exp3)); x.permutei({0,2,1}); y.permutei({0,2,1}); z = y.applyReduce3(nd4j::reduce3::Dot, x, {1}); ASSERT_TRUE(z.equalsTo(&exp2)); x2.permutei({1,0,2}); z = x2.applyReduce3(nd4j::reduce3::Dot, k2, {0,1}); ASSERT_TRUE(z.equalsTo(&exp4)); } //////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, applyReduce3_2) { NDArray x('c', {2,3,4}, {-10,-9,-8.5,-7,-6,-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7,8,9,10,11,12,13}, nd4j::DataType::DOUBLE); NDArray x2('c', {2,3,4}, {-10,-9,-8,-7,-6,-5,-4,-3,-2,-1,0.5,1,2,3,4,5,6,7,8,9,10,11,12,13}, nd4j::DataType::DOUBLE); NDArray y('c', {2,3,4}, {-2,3,-4,5,-2,3,-4,5,-2,3,-4,5,-2.5,3,-4,5,-2,3,-4,5,-2,3,-4,5}, nd4j::DataType::DOUBLE); NDArray k('c', {2,3}, {-2,3,-4,5.5,-2,3}, nd4j::DataType::DOUBLE); NDArray k2('c', {3,2}, {-2,3,-4,5,-2,3.5}, nd4j::DataType::DOUBLE); NDArray exp1('c', {3}, {5., 20., 36.}, nd4j::DataType::DOUBLE); NDArray exp2('c', {2,3}, {-8., -2., 6., 13., 22., 30.}, nd4j::DataType::DOUBLE); NDArray exp3('c', {4}, {39., 42.5, 47., 49.5}, nd4j::DataType::DOUBLE); NDArray exp4('c', {4}, {119., 122.5, 125., 129.5}, nd4j::DataType::DOUBLE); NDArray z = x.applyReduce3(nd4j::reduce3::Dot, y, {0,2}); ASSERT_TRUE(z.equalsTo(&exp1)); z = x.applyReduce3(nd4j::reduce3::Dot, k, {0,1}); ASSERT_TRUE(z.equalsTo(&exp3)); x.permutei({0,2,1}); y.permutei({0,2,1}); z = y.applyReduce3(nd4j::reduce3::Dot, x, {1}); ASSERT_TRUE(z.equalsTo(&exp2)); x2.permutei({1,0,2}); z = x2.applyReduce3(nd4j::reduce3::Dot, k2, {0,1}); ASSERT_TRUE(z.equalsTo(&exp4)); } //////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, applyReduce3_3) { NDArray x1('c', {2,2,2}, {1,2,3,4,5,6,7,8}, nd4j::DataType::INT32); NDArray x2('c', {2,2,2}, {-1,-2,-3,-4,-5,-6,-7,-8}, nd4j::DataType::INT32); NDArray x3('c', {3,2}, {1.5,1.5,1.5,1.5,1.5,1.5}, nd4j::DataType::DOUBLE); NDArray x4('c', {3,2}, {1,2,3,4,5,6}, nd4j::DataType::DOUBLE); NDArray exp1('c', {}, std::vector<double>{-204}, nd4j::DataType::FLOAT32); NDArray exp2('c', {}, std::vector<double>{31.5}, nd4j::DataType::DOUBLE); auto z = x1.applyReduce3(reduce3::Dot, x2); ASSERT_TRUE(z.equalsTo(&exp1)); z = x3.applyReduce3(reduce3::Dot, x4); ASSERT_TRUE(z.equalsTo(&exp2)); x1.permutei({2,1,0}); x2.permutei({2,1,0}); x3.permutei({1,0}); x4.permutei({1,0}); z = x1.applyReduce3(reduce3::Dot, x2); ASSERT_TRUE(z.equalsTo(&exp1)); z = x3.applyReduce3(reduce3::Dot, x4); ASSERT_TRUE(z.equalsTo(&exp2)); } //////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, applyAllReduce3_1) { NDArray x1('c', {2,3,2}, {1,2,3,4,5,6,7,8,-1,-2,-3,-4,}, nd4j::DataType::INT32); NDArray x2('c', {2,2,2}, {-1,-2,-3,-4,-5,-6,-7,-8}, nd4j::DataType::INT32); NDArray x3('c', {3,2}, {1.5,1.5,1.5,1.5,1.5,1.5}, nd4j::DataType::DOUBLE); NDArray x4('c', {3,2}, {1,2,3,4,5,6}, nd4j::DataType::DOUBLE); NDArray exp1('c', {3,2}, {-88.f, -124.f, 6.f, -2.f, 22.f, 14.f}, nd4j::DataType::FLOAT32); NDArray exp2('c', {6,4}, {-36.f, -44.f, -52.f, -60.f,-42.f, -52.f, -62.f, -72.f, 2.f, 0.f, -2.f, -4.f, 6.f, 4.f, 2.f, 0.f, 10.f, 8.f, 6.f, 4.f, 14.f, 12.f, 10.f, 8.f}, nd4j::DataType::FLOAT32); NDArray exp3('c', {1,1}, std::vector<double>{31.5}, nd4j::DataType::DOUBLE); NDArray exp4('c', {3,3}, {4.5, 10.5, 16.5,4.5, 10.5, 16.5,4.5, 10.5, 16.5}, nd4j::DataType::DOUBLE); auto z = x1.applyAllReduce3(reduce3::Dot, x2, {0,2}); ASSERT_TRUE(z.equalsTo(&exp1)); z = x1.applyAllReduce3(reduce3::Dot, x2, {0}); ASSERT_TRUE(z.equalsTo(&exp2)); z = x3.applyAllReduce3(reduce3::Dot, x4, {0,1}); ASSERT_TRUE(z.equalsTo(&exp3)); z = x3.applyAllReduce3(reduce3::Dot, x4, {1}); ASSERT_TRUE(z.equalsTo(&exp4)); x1.permutei({2,1,0}); x2.permutei({2,1,0}); x3.permutei({1,0}); x4.permutei({1,0}); z = x1.applyAllReduce3(reduce3::Dot, x2, {0,2}); ASSERT_TRUE(z.equalsTo(&exp1)); z = x3.applyAllReduce3(reduce3::Dot, x4, {0}); ASSERT_TRUE(z.equalsTo(&exp4)); } ////////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, applyIndexReduce_test1) { NDArray x('c', {2,3}, {0, 10, 1, 2, 2.5,-4}, nd4j::DataType::DOUBLE); NDArray scalar('c', {}, std::vector<double>{100}, nd4j::DataType::INT64); NDArray vec1('c', {2}, {100,100}, nd4j::DataType::INT64); NDArray vec2('c', {3}, {100,100,100}, nd4j::DataType::INT64); NDArray exp1('c', {}, std::vector<double>{1}, nd4j::DataType::INT64); NDArray exp2('c', {2}, {1,1}, nd4j::DataType::INT64); NDArray exp3('c', {3}, {1,0,0}, nd4j::DataType::INT64); NDArray exp4('c', {}, std::vector<double>{2}, nd4j::DataType::INT64); NDArray exp5('c', {2}, {1,1}, nd4j::DataType::INT64); NDArray exp6('c', {3}, {1,0,0}, nd4j::DataType::INT64); x.applyIndexReduce(nd4j::indexreduce::IndexMax, scalar, {0,1}); ASSERT_TRUE(scalar.equalsTo(&exp1)); x.applyIndexReduce(nd4j::indexreduce::IndexMax, vec1, {1}); ASSERT_TRUE(vec1.equalsTo(&exp2)); x.applyIndexReduce(nd4j::indexreduce::IndexMax, vec2, {0}); ASSERT_TRUE(vec2.equalsTo(&exp3)); x.permutei({1,0}); x.applyIndexReduce(nd4j::indexreduce::IndexMax, scalar, {0,1}); ASSERT_TRUE(scalar.equalsTo(&exp4)); x.applyIndexReduce(nd4j::indexreduce::IndexMax, vec1, {0}); ASSERT_TRUE(vec1.equalsTo(&exp5)); x.applyIndexReduce(nd4j::indexreduce::IndexMax, vec2, {1}); ASSERT_TRUE(vec2.equalsTo(&exp6)); } ////////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, applyIndexReduce_test2) { NDArray x('c', {2,3}, {0, 10, 1, 2, 2.5,-4}, nd4j::DataType::DOUBLE); NDArray exp1('c', {}, std::vector<double>{1}, nd4j::DataType::INT64); NDArray exp2('c', {2}, {1,1}, nd4j::DataType::INT64); NDArray exp3('c', {3}, {1,0,0}, nd4j::DataType::INT64); NDArray exp4('c', {}, std::vector<double>{2}, nd4j::DataType::INT64); NDArray exp5('c', {2}, {1,1}, nd4j::DataType::INT64); NDArray exp6('c', {3}, {1,0,0}, nd4j::DataType::INT64); auto z = x.applyIndexReduce(nd4j::indexreduce::IndexMax, {0,1}); ASSERT_TRUE(z.equalsTo(&exp1)); z = x.applyIndexReduce(nd4j::indexreduce::IndexMax, {1}); ASSERT_TRUE(z.equalsTo(&exp2)); z = x.applyIndexReduce(nd4j::indexreduce::IndexMax, {0}); ASSERT_TRUE(z.equalsTo(&exp3)); x.permutei({1,0}); z = x.applyIndexReduce(nd4j::indexreduce::IndexMax, {0,1}); ASSERT_TRUE(z.equalsTo(&exp4)); z = x.applyIndexReduce(nd4j::indexreduce::IndexMax, {0}); ASSERT_TRUE(z.equalsTo(&exp5)); z = x.applyIndexReduce(nd4j::indexreduce::IndexMax, {1}); ASSERT_TRUE(z.equalsTo(&exp6)); } //////////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, reduceAlongDimension_float_test1) { NDArray x('c', {2,3,2}, {1,2,3,4,5,6,7,8,-1,-2,-3,-4,}, nd4j::DataType::INT32); NDArray z1('c', {}, std::vector<double>{100}, nd4j::DataType::DOUBLE); NDArray z2('c', {2,2}, {100,100,100,100}, nd4j::DataType::FLOAT32); NDArray z3('c', {3}, {100,100,100}, nd4j::DataType::DOUBLE); NDArray z4('c', {3,2}, {100,100,100,100,100,100}, nd4j::DataType::FLOAT32); NDArray z5('c', {2}, {100,100}, nd4j::DataType::FLOAT32); NDArray exp1('c', {}, std::vector<double>{2.166667}, nd4j::DataType::DOUBLE); NDArray exp2('c', {2,2}, {3.f,4.f,1.f,0.666667f}, nd4j::DataType::FLOAT32); NDArray exp3('c', {3}, {4.5,1,1}, nd4j::DataType::DOUBLE); NDArray exp4('c', {3,2}, {4,5,1,1,1,1}, nd4j::DataType::FLOAT32); NDArray exp5('c', {2}, {3.5f,0.833333f}, nd4j::DataType::FLOAT32); x.reduceAlongDimension(nd4j::reduce::Mean, z1, {0,1,2}); ASSERT_TRUE(z1.equalsTo(&exp1)); x.reduceAlongDimension(nd4j::reduce::Mean, z2, {1}); ASSERT_TRUE(z2.equalsTo(&exp2)); x.reduceAlongDimension(nd4j::reduce::Mean, z3, {0,2}); ASSERT_TRUE(z3.equalsTo(&exp3)); x.permutei({1,0,2}); // 3x2x2 x.reduceAlongDimension(nd4j::reduce::Mean, z1, {0,1,2}); ASSERT_TRUE(z1.equalsTo(&exp1)); x.reduceAlongDimension(nd4j::reduce::Mean, z4, {1}); ASSERT_TRUE(z4.equalsTo(&exp4)); x.reduceAlongDimension(nd4j::reduce::Mean, z5, {0,2}); ASSERT_TRUE(z5.equalsTo(&exp5)); } //////////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, reduceAlongDimension_float_test2) { NDArray x('c', {2,3,2}, {1,2,3,4,5,6,7,8,-1,-2,-3,-4,}, nd4j::DataType::DOUBLE); NDArray exp1('c', {}, std::vector<double>{2.166667}, nd4j::DataType::DOUBLE); NDArray exp2('c', {2,2}, {3,4,1,0.666667}, nd4j::DataType::DOUBLE); NDArray exp3('c', {3}, {4.5,1,1}, nd4j::DataType::DOUBLE); NDArray exp4('c', {3,2}, {4,5,1,1,1,1}, nd4j::DataType::DOUBLE); NDArray exp5('c', {2}, {3.5,0.833333}, nd4j::DataType::DOUBLE); NDArray z1 = x.reduceAlongDimension(nd4j::reduce::Mean, {0,1,2}); ASSERT_TRUE(z1.equalsTo(&exp1)); NDArray z2 = x.reduceAlongDimension(nd4j::reduce::Mean, {1}); ASSERT_TRUE(z2.equalsTo(&exp2)); NDArray z3 = x.reduceAlongDimension(nd4j::reduce::Mean, {0,2}); ASSERT_TRUE(z3.equalsTo(&exp3)); x.permutei({1,0,2}); // 3x2x2 NDArray z4 = x.reduceAlongDimension(nd4j::reduce::Mean, {0,1,2}); ASSERT_TRUE(z4.equalsTo(&exp1)); NDArray z5 = x.reduceAlongDimension(nd4j::reduce::Mean, {1}); ASSERT_TRUE(z5.equalsTo(&exp4)); NDArray z6 = x.reduceAlongDimension(nd4j::reduce::Mean, {0,2}); ASSERT_TRUE(z6.equalsTo(&exp5)); } ////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, EqualityTest1) { auto arrayA = NDArrayFactory::create_<float>('f', {3, 5}); auto arrayB = NDArrayFactory::create_<float>('f', {3, 5}); auto arrayC = NDArrayFactory::create_<float>('f', {3, 5}); auto arrayD = NDArrayFactory::create_<float>('f', {2, 4}); auto arrayE = NDArrayFactory::create_<float>('f', {1, 15}); for (int i = 0; i < arrayA->rows(); i++) { for (int k = 0; k < arrayA->columns(); k++) { arrayA->p(i, k, (float) i); } } for (int i = 0; i < arrayB->rows(); i++) { for (int k = 0; k < arrayB->columns(); k++) { arrayB->p(i, k, (float) i); } } for (int i = 0; i < arrayC->rows(); i++) { for (int k = 0; k < arrayC->columns(); k++) { arrayC->p(i, k, (float) i+1); } } ASSERT_TRUE(arrayA->equalsTo(arrayB, 1e-5)); ASSERT_FALSE(arrayC->equalsTo(arrayB, 1e-5)); ASSERT_FALSE(arrayD->equalsTo(arrayB, 1e-5)); ASSERT_FALSE(arrayE->equalsTo(arrayB, 1e-5)); delete arrayA; delete arrayB; delete arrayC; delete arrayD; delete arrayE; } //////////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, reduceAlongDimension_same_test1) { NDArray x('c', {2,3,2}, {1.5f,2.f,3.f,4.f,5.f,6.f,7.5f,8.f,-1.f,-2.f,-3.5f,-4.f}, nd4j::DataType::FLOAT32); NDArray z1('c', {}, std::vector<double>{100}, nd4j::DataType::FLOAT32); NDArray z2('c', {2,2}, {100,100,100,100}, nd4j::DataType::FLOAT32); NDArray z3('c', {3}, {100,100,100}, nd4j::DataType::FLOAT32); NDArray z4('c', {3,2}, {100,100,100,100,100,100}, nd4j::DataType::FLOAT32); NDArray z5('c', {2}, {100,100}, nd4j::DataType::FLOAT32); NDArray exp1('c', {}, std::vector<double>{26.5f}, nd4j::DataType::FLOAT32); NDArray exp2('c', {2,2}, {9.5f,12.f,3.f,2.f}, nd4j::DataType::FLOAT32); NDArray exp3('c', {3}, {19.f,4.f,3.5f}, nd4j::DataType::FLOAT32); NDArray exp4('c', {3,2}, {9.f,10.f,2.f,2.f,1.5f,2.f}, nd4j::DataType::FLOAT32); NDArray exp5('c', {2}, {21.5f,5.f}, nd4j::DataType::FLOAT32); x.reduceAlongDimension(nd4j::reduce::Sum, z1, {0,1,2}); ASSERT_TRUE(z1.equalsTo(&exp1)); x.reduceAlongDimension(nd4j::reduce::Sum, z2, {1}); ASSERT_TRUE(z2.equalsTo(&exp2)); x.reduceAlongDimension(nd4j::reduce::Sum, z3, {0,2}); ASSERT_TRUE(z3.equalsTo(&exp3)); x.permutei({1,0,2}); // 3x2x2 x.reduceAlongDimension(nd4j::reduce::Sum, z1, {0,1,2}); ASSERT_TRUE(z1.equalsTo(&exp1)); x.reduceAlongDimension(nd4j::reduce::Sum, z4, {1}); ASSERT_TRUE(z4.equalsTo(&exp4)); x.reduceAlongDimension(nd4j::reduce::Sum, z5, {0,2}); ASSERT_TRUE(z5.equalsTo(&exp5)); } //////////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, reduceAlongDimension_same_test2) { NDArray x('c', {2,3,2}, {1.5,2,3,4,5,6,7.5,8,-1,-2,-3.5,-4,}, nd4j::DataType::INT64); NDArray exp1('c', {}, std::vector<double>{26}, nd4j::DataType::INT64); NDArray exp2('c', {2,2}, {9,12,3,2}, nd4j::DataType::INT64); NDArray exp3('c', {3}, {18,4,4}, nd4j::DataType::INT64); NDArray exp4('c', {3,2}, {8,10,2,2,2,2}, nd4j::DataType::INT64); NDArray exp5('c', {2}, {21,5}, nd4j::DataType::INT64); NDArray z1 = x.reduceAlongDimension(nd4j::reduce::Sum, {0,1,2}); ASSERT_TRUE(z1.equalsTo(&exp1)); NDArray z2 = x.reduceAlongDimension(nd4j::reduce::Sum, {1}); ASSERT_TRUE(z2.equalsTo(&exp2)); NDArray z3 = x.reduceAlongDimension(nd4j::reduce::Sum, {0,2}); ASSERT_TRUE(z3.equalsTo(&exp3)); x.permutei({1,0,2}); // 3x2x2 NDArray z4 = x.reduceAlongDimension(nd4j::reduce::Sum, {0,1,2}); ASSERT_TRUE(z4.equalsTo(&exp1)); NDArray z5 = x.reduceAlongDimension(nd4j::reduce::Sum, {1}); ASSERT_TRUE(z5.equalsTo(&exp4)); NDArray z6 = x.reduceAlongDimension(nd4j::reduce::Sum, {0,2}); ASSERT_TRUE(z6.equalsTo(&exp5)); } //////////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, reduceAlongDimension_bool_test1) { NDArray x('c', {2,3,2}, {0.5,2,3,-4,5,6,-7.5,8,-1,-0.5,-3.5,4}, nd4j::DataType::DOUBLE); NDArray z1('c', {}, std::vector<double>{true}, nd4j::DataType::BOOL); NDArray z2('c', {2,2}, {true,true,true,true}, nd4j::DataType::BOOL); NDArray z3('c', {3}, {true,true,true}, nd4j::DataType::BOOL); NDArray z4('c', {3,2}, {true,true,true,true,true,true}, nd4j::DataType::BOOL); NDArray z5('c', {2}, {true,true}, nd4j::DataType::BOOL); NDArray exp1('c', {}, std::vector<double>{true}, nd4j::DataType::BOOL); NDArray exp2('c', {2,2}, {true,true,false,true}, nd4j::DataType::BOOL); NDArray exp3('c', {3}, {true,true,true}, nd4j::DataType::BOOL); NDArray exp4('c', {3,2}, {true,true,true,false,true,true}, nd4j::DataType::BOOL); NDArray exp5('c', {2}, {true,true}, nd4j::DataType::BOOL); x.reduceAlongDimension(nd4j::reduce::IsPositive, z1, {0,1,2}); ASSERT_TRUE(z1.equalsTo(&exp1)); x.reduceAlongDimension(nd4j::reduce::IsPositive, z2, {1}); ASSERT_TRUE(z2.equalsTo(&exp2)); x.reduceAlongDimension(nd4j::reduce::IsPositive, z3, {0,2}); ASSERT_TRUE(z3.equalsTo(&exp3)); x.permutei({1,0,2}); // 3x2x2 x.reduceAlongDimension(nd4j::reduce::IsPositive, z1, {0,1,2}); ASSERT_TRUE(z1.equalsTo(&exp1)); x.reduceAlongDimension(nd4j::reduce::IsPositive, z4, {1}); ASSERT_TRUE(z4.equalsTo(&exp4)); x.reduceAlongDimension(nd4j::reduce::IsPositive, z5, {0,2}); ASSERT_TRUE(z5.equalsTo(&exp5)); } //////////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, reduceAlongDimension_bool_test2) { NDArray x('c', {2,3,2}, {0.5,2,3,-4,5,6,-7.5,8,-1,-0.5,-3.5,4}, nd4j::DataType::INT32); NDArray exp1('c', {}, std::vector<double>{1}, nd4j::DataType::BOOL); NDArray exp2('c', {2,2}, {1,1,0,1}, nd4j::DataType::BOOL); NDArray exp3('c', {3}, {1,1,1}, nd4j::DataType::BOOL); NDArray exp4('c', {3,2}, {0,1,1,0,1,1}, nd4j::DataType::BOOL); NDArray exp5('c', {2}, {1,1}, nd4j::DataType::BOOL); NDArray z1 = x.reduceAlongDimension(nd4j::reduce::IsPositive, {0,1,2}); ASSERT_TRUE(z1.equalsTo(&exp1)); NDArray z2 = x.reduceAlongDimension(nd4j::reduce::IsPositive, {1}); ASSERT_TRUE(z2.equalsTo(&exp2)); NDArray z3 = x.reduceAlongDimension(nd4j::reduce::IsPositive, {0,2}); ASSERT_TRUE(z3.equalsTo(&exp3)); x.permutei({1,0,2}); // 3x2x2 NDArray z4 = x.reduceAlongDimension(nd4j::reduce::IsPositive, {0,1,2}); ASSERT_TRUE(z4.equalsTo(&exp1)); NDArray z5 = x.reduceAlongDimension(nd4j::reduce::IsPositive, {1}); ASSERT_TRUE(z5.equalsTo(&exp4)); NDArray z6 = x.reduceAlongDimension(nd4j::reduce::IsPositive, {0,2}); ASSERT_TRUE(z6.equalsTo(&exp5)); } //////////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, reduceAlongDimension_long_test1) { NDArray x('c', {2,3,2}, {0.5f,2.f,3.f,-0.f,5.f,6.f,-7.5f,0.f,-1.f,-0.5f,-3.5f,4.f}, nd4j::DataType::FLOAT32); NDArray z1('c', {}, std::vector<double>{100}, nd4j::DataType::INT64); NDArray z2('c', {2,2}, {100,100,100,100}, nd4j::DataType::INT64); NDArray z3('c', {3}, {100,100,100}, nd4j::DataType::INT64); NDArray z4('c', {3,2}, {100,100,100,100,100,100}, nd4j::DataType::INT64); NDArray z5('c', {2}, {100,100}, nd4j::DataType::INT64); NDArray exp1('c', {}, std::vector<double>{2}, nd4j::DataType::INT64); NDArray exp2('c', {2,2}, {0,1,0,1}, nd4j::DataType::INT64); NDArray exp3('c', {3}, {1,1,0}, nd4j::DataType::INT64); NDArray exp4('c', {3,2}, {0,1,0,1,0,0}, nd4j::DataType::INT64); NDArray exp5('c', {2}, {1,1}, nd4j::DataType::INT64); x.reduceAlongDimension(nd4j::reduce::CountZero, z1, {0,1,2}); ASSERT_TRUE(z1.equalsTo(&exp1)); x.reduceAlongDimension(nd4j::reduce::CountZero, z2, {1}); ASSERT_TRUE(z2.equalsTo(&exp2)); x.reduceAlongDimension(nd4j::reduce::CountZero, z3, {0,2}); ASSERT_TRUE(z3.equalsTo(&exp3)); x.permutei({1,0,2}); // 3x2x2 x.reduceAlongDimension(nd4j::reduce::CountZero, z1, {0,1,2}); ASSERT_TRUE(z1.equalsTo(&exp1)); x.reduceAlongDimension(nd4j::reduce::CountZero, z4, {1}); ASSERT_TRUE(z4.equalsTo(&exp4)); x.reduceAlongDimension(nd4j::reduce::CountZero, z5, {0,2}); ASSERT_TRUE(z5.equalsTo(&exp5)); } //////////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, reduceAlongDimension_long_test2) { NDArray x('c', {2,3,2}, {0.5,2,3,-0,5,6,-7.5,0,-1,-0.5,-3.5,4}, nd4j::DataType::INT32); NDArray exp1('c', {}, std::vector<double>{4}, nd4j::DataType::INT64); NDArray exp2('c', {2,2}, {1,1,0,2}, nd4j::DataType::INT64); NDArray exp3('c', {3}, {2,2,0}, nd4j::DataType::INT64); NDArray exp4('c', {3,2}, {1,1,0,2,0,0}, nd4j::DataType::INT64); NDArray exp5('c', {2}, {2,2}, nd4j::DataType::INT64); NDArray z1 = x.reduceAlongDimension(nd4j::reduce::CountZero, {0,1,2}); ASSERT_TRUE(z1.equalsTo(&exp1)); NDArray z2 = x.reduceAlongDimension(nd4j::reduce::CountZero, {1}); ASSERT_TRUE(z2.equalsTo(&exp2)); NDArray z3 = x.reduceAlongDimension(nd4j::reduce::CountZero, {0,2}); ASSERT_TRUE(z3.equalsTo(&exp3)); x.permutei({1,0,2}); // 3x2x2 NDArray z4 = x.reduceAlongDimension(nd4j::reduce::CountZero, {0,1,2}); ASSERT_TRUE(z4.equalsTo(&exp1)); NDArray z5 = x.reduceAlongDimension(nd4j::reduce::CountZero, {1}); ASSERT_TRUE(z5.equalsTo(&exp4)); NDArray z6 = x.reduceAlongDimension(nd4j::reduce::CountZero, {0,2}); ASSERT_TRUE(z6.equalsTo(&exp5)); } TEST_F(NDArrayCudaBasicsTests, BroadcastOpsTest1) { auto x = NDArrayFactory::create<float>('c', {5, 5}); auto z = NDArrayFactory::create<float>('c', {5, 5}); auto row = NDArrayFactory::linspace(1.0f, 5.0f, 5); NDArray expRow('c', {1, 5,}, {1,2,3,4,5}, nd4j::DataType::FLOAT32); NDArray exp('c', {5,5}, {1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5}, nd4j::DataType::FLOAT32); ASSERT_TRUE(row->equalsTo(&expRow)); x.applyBroadcast(broadcast::Add, {1}, *row, z); x += *row; ASSERT_TRUE(x.equalsTo(z)); //ASSERT_TRUE(z.equalsTo(&exp)); delete row; } TEST_F(NDArrayCudaBasicsTests, BroadcastOpsTest2) { auto x = NDArrayFactory::create<float>('c', {5, 5}); //auto z = NDArrayFactory::create<float>('c', {5, 5}); auto row = NDArrayFactory::linspace(1.0f, 5.0f, 5); NDArray expRow('c', {1, 5,}, {1,2,3,4,5}, nd4j::DataType::FLOAT32); NDArray exp('c', {5,5}, {1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5}, nd4j::DataType::FLOAT32); ASSERT_TRUE(row->equalsTo(&expRow)); x.applyBroadcast(broadcast::Add, {1}, *row, x); ASSERT_TRUE(x.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, TestBroadcast_1) { NDArray exp('c', {2, 3, 2, 2}, {1., 1., 1., 1., 2., 2., 2., 2., 3., 3., 3., 3., 1., 1., 1., 1., 2., 2., 2., 2., 3., 3., 3., 3.}, nd4j::DataType::DOUBLE); auto input = NDArrayFactory::create<double>('c',{ 2, 3, 2, 2}); auto bias = NDArrayFactory::create<double>('c', {1, 3}); bias.linspace(1); input.applyBroadcast(broadcast::Add, {1}, bias, input); ASSERT_TRUE(exp.equalsTo(&input)); } TEST_F(NDArrayCudaBasicsTests, TestFloat16_1) { auto x = NDArrayFactory::create<float>({1,2,3,4,5,7,8,9}); auto y = NDArrayFactory::create<float>({1,2,3,4,5,7,8,9}); ASSERT_TRUE(x.equalsTo(&y)); } TEST_F(NDArrayCudaBasicsTests, TestFloat16_2) { auto x = NDArrayFactory::create<float16>('c', {9}, {1,2,3,4,5,6,7,8,9}); auto y = NDArrayFactory::create<float16>('c', {9}, {1,2,3,4,5,6,7,8,9}); ASSERT_TRUE(x.equalsTo(y)); //for (int e = 0; e < x.lengthOf(); e++) // ASSERT_NEAR(x.e<float16>(e), y.e<float16>(e), 1.e-5f); } TEST_F(NDArrayCudaBasicsTests, TestFloat16_3) { auto x = NDArrayFactory::create<bfloat16>({1,2,3,4,5,7,8,9}); auto y = NDArrayFactory::create<bfloat16>({1,2,3,4,5,7,8,9}); ASSERT_TRUE(x.equalsTo(&y)); } TEST_F(NDArrayCudaBasicsTests, TestFloat_4) { auto x = NDArrayFactory::create<float>({1,2,3,4,5,7,8,9}); auto y = NDArrayFactory::create<float>({2,4,5,5,6,7,8,9}); ASSERT_FALSE(x.equalsTo(&y)); } TEST_F(NDArrayCudaBasicsTests, TestFloat_5) { auto x = NDArrayFactory::create<float>('c', {3,3}, {1,2,3,4,5,6,7,8,9}); auto y = NDArrayFactory::create<float>('c', {3,3}, {2,4,5,5,6,7,8,9, 10}); ASSERT_FALSE(x.equalsTo(&y)); } TEST_F(NDArrayCudaBasicsTests, TestFloat_6) { auto x = NDArrayFactory::create<float>('f', {3,3}, {1,2,3,4,5,6,7,8,9}); auto y = NDArrayFactory::create<float>('f', {3,3}, {2,4,5,5,6,7,8,9,10}); ASSERT_FALSE(x.equalsTo(&y)); } ////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, Operator_Plus_Test_05) { auto x = NDArrayFactory::create<float>('c', {8, 8, 8}); auto y = NDArrayFactory::create<float>('c', {1, 8, 8}); auto expected = NDArrayFactory::create<float>('c', {8, 8, 8}); NDArray res2 = NDArrayFactory::create<float>(expected.ordering(), expected.getShapeAsVector()); x = 1.; y = 2.; expected = 3.; res2 = 0.f; x.applyTrueBroadcast(BroadcastOpsTuple::Add(), y, res2);// *= y; ASSERT_TRUE(expected.isSameShape(&res2)); ASSERT_TRUE(expected.equalsTo(&res2)); } ////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, Operator_Plus_Test_5) { auto x = NDArrayFactory::create<float>('c', {8, 8, 8}); auto y = NDArrayFactory::create<float>('c', {8, 1, 8}); auto expected = NDArrayFactory::create<float>('c', {8, 8, 8}); NDArray res2(expected); x = 1.; y = 2.; expected = 3.; //x.printBuffer("X="); //y.printBuffer("Y="); //expected.printBuffer("EXPECTED"); auto result = x + y; //result.printBuffer("1 + 2 ="); //res2.assign(x + y); //x.applyTrueBroadcast(BroadcastOpsTuple::Add(), &y, &res2); //res2.printBuffer("Z="); //x.applyTrueBroadcast(BroadcastOpsTuple::Add(), &y, &res2);// *= y; // x += y; //x.printBuffer("OutputX"); //res2.syncToHost(); //res2.printBuffer("OUputZ"); //x.printIndexedBuffer("OUtputX"); ASSERT_TRUE(expected.isSameShape(&result)); ASSERT_TRUE(expected.equalsTo(&result)); } ////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, Operator_Plus_Test_51) { auto x = NDArrayFactory::create<float>('c', {8, 8, 8}); auto y = NDArrayFactory::create<float>('c', {8, 8}); auto expected = NDArrayFactory::create<float>('c', {8, 8, 8}); NDArray res2(expected); x = 1.; y = 2.; expected = 3.; //x.printBuffer("X="); //y.printBuffer("Y="); //expected.printBuffer("EXPECTED"); auto result = x + y; //result.printBuffer("1 + 2 ="); //res2.assign(x + y); //x.applyTrueBroadcast(BroadcastOpsTuple::Add(), &y, &res2); //res2.printBuffer("Z="); //x.applyTrueBroadcast(BroadcastOpsTuple::Add(), &y, &res2);// *= y; // x += y; //x.printBuffer("OutputX"); //res2.syncToHost(); //res2.printBuffer("OUputZ"); //x.printIndexedBuffer("OUtputX"); ASSERT_TRUE(expected.isSameShape(&result)); ASSERT_TRUE(expected.equalsTo(&result)); } TEST_F(NDArrayCudaBasicsTests, Tile_Test_2_1) { auto x = NDArrayFactory::create<float>('c', {2, 1, 2}); x = 10.; auto y = x.tile({1,2,1}); auto exp = NDArrayFactory::create<float>('c', {2, 2, 2}); exp = 10.; // y.printShapeInfo("Output SHAPE"); // y.printBuffer("Output TILE"); // exp.printBuffer("Expect TILE"); ASSERT_TRUE(exp.equalsTo(y)); } TEST_F(NDArrayCudaBasicsTests, Tile_Test_2_2) { auto x = NDArrayFactory::create<float>('f', {2, 1, 2}); x = 10.; auto y = x.tile({1,2,1}); auto exp = NDArrayFactory::create<float>('f', {2, 2, 2}); exp = 10.; ASSERT_TRUE(exp.equalsTo(y)); } TEST_F(NDArrayCudaBasicsTests, Tile_Test_2_3) { auto x = NDArrayFactory::create<float>('f', {2, 1, 2}); x = 10.; x.p(1,0,1, 20); x.syncToDevice(); auto y = x.tile({1,2,1}); auto exp = NDArrayFactory::create<float>('f', {2, 2, 2}); exp = 10.; exp.p(1,0,1, 20.); exp.p(1, 1, 1, 20.); exp.syncToDevice(); ASSERT_TRUE(exp.equalsTo(y)); } ////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, Operator_Plus_Test_2) { double expBuff[] = {2., 3, 3., 4., 4., 5, 5., 6., 6., 7, 7., 8.}; NDArray a('c', {4,4}, {1,2,3,4,5,6,7,8,9,2,3,2,1,0,4,7}, nd4j::DataType::FLOAT32); auto x = NDArrayFactory::create<double>('c', {3, 2, 1}); auto y = NDArrayFactory::create<double>('c', {1, 2}); auto expected = NDArrayFactory::create<double>(expBuff, 'c', {3, 2, 2}); x.linspace(1); y.linspace(1); auto result = x + y; ASSERT_TRUE(expected.isSameShape(&result)); ASSERT_TRUE(expected.equalsTo(&result)); } ////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, assign_2) { NDArray x('c', {4}, {1.5f,2.5f,3.5f,4.5f}, nd4j::DataType::FLOAT32); NDArray y('c', {4}, nd4j::DataType::INT32); NDArray expected('c', {4}, {1,2,3,4}, nd4j::DataType::INT32); y.assign(x); // y.printBuffer("ASSIGN VECTOR"); ASSERT_TRUE(expected.equalsTo(&y)); } ////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, subarray_1) { NDArray x('c', {2,3,4}, {1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24}, nd4j::DataType::FLOAT32); NDArray y('f', {2,3,4}, {1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24}, nd4j::DataType::FLOAT32); Nd4jLong shapeExpX0[] = {1, 2, 12, 8192, 1, 99}; float buffExpX0[] = {1.f, 13.f}; Nd4jLong shapeExpX1[] = {1, 2, 12, 8192, 1, 99}; float buffExpX1[] = {2.f, 14.f}; Nd4jLong shapeExpX2[] = {3, 2, 1, 1, 12, 4, 1, 8192, 1, 99}; float buffExpX2[] = {1.f, 13.f}; Nd4jLong shapeExpX3[] = {2, 2, 4, 12, 1, 8192, 1, 99}; float buffExpX3[] = {9.f, 10.f, 11.f, 12.f, 21.f, 22.f, 23.f, 24.f}; Nd4jLong shapeExpX4[] = {3, 2, 1, 4, 12, 4, 1, 8192, 1, 99}; float buffExpX4[] = {9.f, 10.f, 11.f, 12.f, 21.f, 22.f, 23.f, 24.f}; Nd4jLong shapeExpX5[] = {2, 2, 3, 12, 4, 8192, 1, 99}; float buffExpX5[] = {4.f, 8.f, 12.f, 16.f, 20.f, 24.f}; Nd4jLong shapeExpY0[] = {1, 2, 1, 8192, 1, 99}; float buffExpY0[] = {1.f, 2.f}; Nd4jLong shapeExpY1[] = {1, 2, 1, 8192, 1, 99}; float buffExpY1[] = {7.f, 8.f}; Nd4jLong shapeExpY2[] = {3, 2, 1, 1, 1, 2, 6, 8192, 1, 102}; float buffExpY2[] = {1.f, 2.f}; Nd4jLong shapeExpY3[] = {2, 2, 4, 1, 6, 8192, 1, 99}; float buffExpY3[] = {5.f, 11.f, 17.f, 23.f, 6.f, 12.f, 18.f, 24.f}; Nd4jLong shapeExpY4[] = {3, 2, 1, 4, 1, 2, 6, 8192, 1, 102}; float buffExpY4[] = {5.f, 11.f, 17.f, 23.f, 6.f, 12.f, 18.f, 24.f}; Nd4jLong shapeExpY5[] = {2, 2, 3, 1, 2, 8192, 1, 99}; float buffExpY5[] = {19.f, 21.f, 23.f, 20.f, 22.f, 24.f}; NDArray x0 = x(0, {1,2}); NDArray xExp(buffExpX0, shapeExpX0); ASSERT_TRUE(xExp.isSameShape(x0)); ASSERT_TRUE(xExp.equalsTo(x0)); // for(int i = 0; i < shape::shapeInfoLength(x0.rankOf()); ++i) // ASSERT_TRUE(x0.getShapeInfo()[i] == shapeExpX0[i]); // for(int i = 0; i < x0.lengthOf(); ++i) // ASSERT_TRUE(x0.e<float>(i) == buffExpX0[i]); NDArray x1 = x(1, {1,2}); NDArray x1Exp(buffExpX1, shapeExpX1); ASSERT_TRUE(x1Exp.isSameShape(x1)); ASSERT_TRUE(x1Exp.equalsTo(x1)); // for(int i = 0; i < shape::shapeInfoLength(x1.rankOf()); ++i) // ASSERT_TRUE(x1.getShapeInfo()[i] == shapeExpX1[i]); // for(int i = 0; i < x1.lengthOf(); ++i) // ASSERT_TRUE(x1.e<float>(i) == buffExpX1[i]); NDArray x2 = x(0, {1,2}, true); NDArray x2Exp(buffExpX2, shapeExpX2); ASSERT_TRUE(x2Exp.isSameShape(x2)); // x2.printBuffer("X2"); // x2Exp.printBuffer("X2 EXPECT"); ASSERT_TRUE(x2Exp.equalsTo(x2)); // for(int i = 0; i < shape::shapeInfoLength(x2.rankOf()); ++i) // ASSERT_TRUE(x2.getShapeInfo()[i] == shapeExpX2[i]); // for(int i = 0; i < x2.lengthOf(); ++i) // ASSERT_TRUE(x2.e<float>(i) == buffExpX2[i]); NDArray x3 = x(2, {1}); NDArray x3Exp(buffExpX3, shapeExpX3); ASSERT_TRUE(x3Exp.isSameShape(x3)); ASSERT_TRUE(x3Exp.equalsTo(x3)); // for(int i = 0; i < shape::shapeInfoLength(x3.rankOf()); ++i) // ASSERT_TRUE(x3.getShapeInfo()[i] == shapeExpX3[i]); // for(int i = 0; i < x3.lengthOf(); ++i) // ASSERT_TRUE(x3.e<float>(i) == buffExpX3[i]); NDArray x4 = x(2, {1}, true); NDArray x4Exp(buffExpX4, shapeExpX4); ASSERT_TRUE(x4Exp.isSameShape(x4)); ASSERT_TRUE(x4Exp.equalsTo(x4)); // for(int i = 0; i < shape::shapeInfoLength(x4.rankOf()); ++i) // ASSERT_TRUE(x4.getShapeInfo()[i] == shapeExpX4[i]); // for(int i = 0; i < x4.lengthOf(); ++i) // ASSERT_TRUE(x4.e<float>(i) == buffExpX4[i]); NDArray x5 = x(3, {2}); NDArray x5Exp(buffExpX5, shapeExpX5); ASSERT_TRUE(x5Exp.isSameShape(x5)); ASSERT_TRUE(x5Exp.equalsTo(x5)); // for(int i = 0; i < shape::shapeInfoLength(x5.rankOf()); ++i) // ASSERT_TRUE(x5.getShapeInfo()[i] == shapeExpX5[i]); // for(int i = 0; i < x5.lengthOf(); ++i) // ASSERT_TRUE(x5.e<float>(i) == buffExpX5[i]); // ******************* // NDArray y0 = y(0, {1,2}); NDArray y0Exp(buffExpY0, shapeExpY0); ASSERT_TRUE(y0Exp.isSameShape(y0)); ASSERT_TRUE(y0Exp.equalsTo(y0)); // for(int i = 0; i < shape::shapeInfoLength(y0.rankOf()); ++i) // ASSERT_TRUE(y0.getShapeInfo()[i] == shapeExpY0[i]); // for(int i = 0; i < y0.lengthOf(); ++i) // ASSERT_TRUE(y0.e<float>(i) == buffExpY0[i]); NDArray y1 = y(1, {1,2}); NDArray y1Exp(buffExpY1, shapeExpY1); ASSERT_TRUE(y1Exp.isSameShape(y1)); ASSERT_TRUE(y1Exp.equalsTo(y1)); // for(int i = 0; i < shape::shapeInfoLength(y1.rankOf()); ++i) // ASSERT_TRUE(y1.getShapeInfo()[i] == shapeExpY1[i]); // for(int i = 0; i < y1.lengthOf(); ++i) // ASSERT_TRUE(y1.e<float>(i) == buffExpY1[i]); NDArray y2 = y(0, {1,2}, true); NDArray y2Exp(buffExpY2, shapeExpY2); ASSERT_TRUE(y2Exp.isSameShape(y2)); ASSERT_TRUE(y2Exp.equalsTo(y2)); // for(int i = 0; i < shape::shapeInfoLength(y2.rankOf()); ++i) // ASSERT_TRUE(y2.getShapeInfo()[i] == shapeExpY2[i]); // for(int i = 0; i < y2.lengthOf(); ++i) // ASSERT_TRUE(y2.e<float>(i) == buffExpY2[i]); NDArray y3 = y(2, {1}); NDArray y3Exp(buffExpY3, shapeExpY3); ASSERT_TRUE(y3Exp.isSameShape(y3)); ASSERT_TRUE(y3Exp.equalsTo(y3)); // for(int i = 0; i < shape::shapeInfoLength(y3.rankOf()); ++i) // ASSERT_TRUE(y3.getShapeInfo()[i] == shapeExpY3[i]); // for(int i = 0; i < y3.lengthOf(); ++i) // ASSERT_TRUE(y3.e<float>(i) == buffExpY3[i]); NDArray y4 = y(2, {1}, true); NDArray y4Exp = NDArrayFactory::create<float>('f', {2,1,4}, {5, 6, 11, 12, 17, 18, 23, 24}); ASSERT_TRUE(y4Exp.isSameShape(y4)); ASSERT_TRUE(y4Exp.equalsTo(y4)); // for(int i = 0; i < shape::shapeInfoLength(y4.rankOf()); ++i) // ASSERT_TRUE(y4.getShapeInfo()[i] == shapeExpY4[i]); // for(int i = 0; i < y4.lengthOf(); ++i) // ASSERT_TRUE(y4.e<float>(i) == buffExpY4[i]); NDArray y5 = y(3, {2}); NDArray y5Exp(buffExpY5, shapeExpY5); ASSERT_TRUE(y5Exp.isSameShape(y5)); ASSERT_TRUE(y5Exp.equalsTo(y5)); // for(int i = 0; i < shape::shapeInfoLength(y5.rankOf()); ++i) // ASSERT_TRUE(y5.getShapeInfo()[i] == shapeExpY5[i]); // for(int i = 0; i < y5.lengthOf(); ++i) // ASSERT_TRUE(y5.e<float>(i) == buffExpY5[i]); } ////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, Test_diagonal_1) { auto x = NDArrayFactory::create<float>('c', {2, 3}, {1, 2, 3, 4, 5, 6}); auto exp = NDArrayFactory::create<float>('c', {2, 1}, {1, 5}); auto diag = x.diagonal('c'); //diag.syncToDevice(); for (Nd4jLong e = 0; e < exp.lengthOf(); ++e) { printf("VAL[%ld] = %f\n", e, diag.e<float>(e)); //, exp.e<float>(e), 1.e-5); } for (Nd4jLong e = 0; e < exp.lengthOf(); ++e) { ASSERT_NEAR(diag.e<float>(e), exp.e<float>(e), 1.e-5); } double eps(1.e-5); NDArray tmp(nd4j::DataType::FLOAT32, x.getContext()); // scalar = 0 ExtraArguments extras({eps}); NativeOpExecutioner::execReduce3Scalar(diag.getContext(), reduce3::EqualsWithEps, diag.getBuffer(), diag.getShapeInfo(), diag.getSpecialBuffer(), diag.getSpecialShapeInfo(), extras.argumentsAsT(nd4j::DataType::FLOAT32), exp.getBuffer(), exp.getShapeInfo(), exp.getSpecialBuffer(), exp.getSpecialShapeInfo(), tmp.buffer(), tmp.shapeInfo(), tmp.specialBuffer(), tmp.specialShapeInfo()); cudaStream_t* stream = x.getContext()->getCudaStream(); auto res = cudaStreamSynchronize(*stream); // tmp.printBuffer("Compare result is (expected 0)"); ASSERT_TRUE(exp.isSameShape(diag)); ASSERT_TRUE(exp.equalsTo(diag)); } TEST_F(NDArrayCudaBasicsTests, Test_PermuteEquality_02) { auto x = NDArrayFactory::linspace<float>(1.f, 60.f, 60); //('c', {1, 60}); //x.linspace(1); auto exp = NDArrayFactory::create<float>('c', {3, 4, 5}, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f, 25.0f, 26.0f, 27.0f, 28.0f, 29.0f, 30.0f, 31.0f, 32.0f, 33.0f, 34.0f, 35.0f, 36.0f, 37.0f, 38.0f, 39.0f, 40.0f, 41.0f, 42.0f, 43.0f, 44.0f, 45.0f, 46.0f, 47.0f, 48.0f, 49.0f, 50.0f, 51.0f, 52.0f, 53.0f, 54.0f, 55.0f, 56.0f, 57.0f, 58.0f, 59.0f, 60.0}); x->reshapei('c', {3, 4, 5}); x->permutei({0, 1, 2}); x->streamline(); // x.printShapeInfo("{0, 1, 2} shape"); // x.printBuffer("{0, 1, 2} data"); ASSERT_TRUE(exp.isSameShape(x)); ASSERT_TRUE(exp.equalsTo(x)); delete x; } TEST_F(NDArrayCudaBasicsTests, Test_PermuteEquality_0) { auto x = NDArrayFactory::create<float>('c', {1, 60}); x.linspace(1); auto exp = NDArrayFactory::create<float>('c', {3, 4, 5}, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f, 25.0f, 26.0f, 27.0f, 28.0f, 29.0f, 30.0f, 31.0f, 32.0f, 33.0f, 34.0f, 35.0f, 36.0f, 37.0f, 38.0f, 39.0f, 40.0f, 41.0f, 42.0f, 43.0f, 44.0f, 45.0f, 46.0f, 47.0f, 48.0f, 49.0f, 50.0f, 51.0f, 52.0f, 53.0f, 54.0f, 55.0f, 56.0f, 57.0f, 58.0f, 59.0f, 60.0}); x.reshapei('c', {3, 4, 5}); x.permutei({0, 1, 2}); x.streamline(); // x.printShapeInfo("{0, 1, 2} shape"); // x.printBuffer("{0, 1, 2} data"); ASSERT_TRUE(exp.isSameShape(&x)); ASSERT_TRUE(exp.equalsTo(&x)); } TEST_F(NDArrayCudaBasicsTests, Test_PermuteEquality_1) { auto x = NDArrayFactory::create<float>('c', {1, 60}); x.linspace(1); auto exp = NDArrayFactory::create<float>('c', {3, 4, 5}, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f, 25.0f, 26.0f, 27.0f, 28.0f, 29.0f, 30.0f, 31.0f, 32.0f, 33.0f, 34.0f, 35.0f, 36.0f, 37.0f, 38.0f, 39.0f, 40.0f, 41.0f, 42.0f, 43.0f, 44.0f, 45.0f, 46.0f, 47.0f, 48.0f, 49.0f, 50.0f, 51.0f, 52.0f, 53.0f, 54.0f, 55.0f, 56.0f, 57.0f, 58.0f, 59.0f, 60.0}); x.reshapei('c', {3, 4, 5}); x.permutei({0, 1, 2}); x.streamline(); // x.printShapeInfo("{0, 1, 2} shape"); // x.printBuffer("{0, 1, 2} data"); ASSERT_TRUE(exp.isSameShape(&x)); ASSERT_TRUE(exp.equalsTo(&x)); } TEST_F(NDArrayCudaBasicsTests, Test_PermuteEquality_2) { //auto x = NDArrayFactory::create<float>('c', {1, 60}); auto xx = NDArrayFactory::linspace<float>(1.f, 60.f, 60); //('c', {1, 60}); // auto x = *xx; //x.linspace(1); // auto exp = NDArrayFactory::create<float>('c', {3, 4, 5}, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f, 25.0f, 26.0f, 27.0f, 28.0f, 29.0f, 30.0f, 31.0f, 32.0f, 33.0f, 34.0f, 35.0f, 36.0f, 37.0f, 38.0f, 39.0f, 40.0f, 41.0f, 42.0f, 43.0f, 44.0f, 45.0f, 46.0f, 47.0f, 48.0f, 49.0f, 50.0f, 51.0f, 52.0f, 53.0f, 54.0f, 55.0f, 56.0f, 57.0f, 58.0f, 59.0f, 60.0}); // x.reshapei('c', {3, 4, 5}); // x.permutei({0, 1, 2}); // x.streamline(); // x.printShapeInfo("{0, 1, 2} shape"); // x.printBuffer("{0, 1, 2} data"); // ASSERT_TRUE(exp.isSameShape(&x)); // ASSERT_TRUE(exp.equalsTo(&x)); delete xx; } TEST_F(NDArrayCudaBasicsTests, Test_PermuteEquality_3) { auto x = NDArrayFactory::create<float>('c', {1, 60}); //x.linspace(1); for (int l = 0; l < x.lengthOf(); l++) x.p(l, float(l + 1.f)); auto exp = NDArrayFactory::create<float>('c', {3, 4, 5}, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f, 25.0f, 26.0f, 27.0f, 28.0f, 29.0f, 30.0f, 31.0f, 32.0f, 33.0f, 34.0f, 35.0f, 36.0f, 37.0f, 38.0f, 39.0f, 40.0f, 41.0f, 42.0f, 43.0f, 44.0f, 45.0f, 46.0f, 47.0f, 48.0f, 49.0f, 50.0f, 51.0f, 52.0f, 53.0f, 54.0f, 55.0f, 56.0f, 57.0f, 58.0f, 59.0f, 60.0}); x.reshapei('c', {3, 4, 5}); x.permutei({0, 1, 2}); x.streamline(); // x.printShapeInfo("{0, 1, 2} shape"); // x.printBuffer("{0, 1, 2} data"); ASSERT_TRUE(exp.isSameShape(&x)); ASSERT_TRUE(exp.equalsTo(&x)); } TEST_F(NDArrayCudaBasicsTests, Test_Empty_1) { auto x = NDArrayFactory::empty<float>(); ASSERT_TRUE(x.isActualOnHostSide()); ASSERT_TRUE(x.isEmpty()); } TEST_F(NDArrayCudaBasicsTests, Test_Empty_2) { auto x = NDArrayFactory::empty_<float>(); ASSERT_TRUE(x->isEmpty()); delete x; } TEST_F(NDArrayCudaBasicsTests, Test_Empty_3) { auto x = NDArrayFactory::empty(nd4j::DataType::FLOAT32); ASSERT_TRUE(x.isEmpty()); } TEST_F(NDArrayCudaBasicsTests, Test_Empty_4) { auto x = NDArrayFactory::empty_(nd4j::DataType::FLOAT32); ASSERT_TRUE(x->isEmpty()); delete x; }
00b94b246a73b6c547bd37301161caec3b0fe60c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /// /// Copyright (c) 2013, Intel Corporation /// /// Redistribution and use in source and binary forms, with or without /// modification, are permitted provided that the following conditions /// are met: /// /// * Redistributions of source code must retain the above copyright /// notice, this list of conditions and the following disclaimer. /// * Redistributions in binary form must reproduce the above /// copyright notice, this list of conditions and the following /// disclaimer in the documentation and/or other materials provided /// with the distribution. /// * Neither the name of Intel Corporation nor the names of its /// contributors may be used to endorse or promote products /// derived from this software without specific prior written /// permission. /// /// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS /// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT /// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS /// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE /// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, /// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, /// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; /// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER /// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT /// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN /// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE /// POSSIBILITY OF SUCH DAMAGE. ////////////////////////////////////////////////////////////////////// /// /// NAME: Stencil /// /// PURPOSE: This program tests the efficiency with which a space-invariant, /// linear, symmetric filter (stencil) can be applied to a square /// grid or image. /// /// USAGE: The program takes as input the linear /// dimension of the grid, and the number of iterations on the grid /// /// <progname> <iterations> <grid size> /// /// The output consists of diagnostics to make sure the /// algorithm worked, and of timing statistics. /// /// FUNCTIONS CALLED: /// /// Other than standard C functions, the following functions are used in /// this program: /// wtime() /// /// HISTORY: - Written by Rob Van der Wijngaart, February 2009. /// - RvdW: Removed unrolling pragmas for clarity; /// added constant to array "in" at end of each iteration to force /// refreshing of neighbor data in parallel versions; August 2013 /// C++11-ification by Jeff Hammond, May 2017. /// ////////////////////////////////////////////////////////////////////// #include "prk_util.h" #include "prk_cuda.h" #include "stencil_cuda.hpp" __global__ void nothing(const int n, const prk_float * in, prk_float * out) { //printf("You are trying to use a stencil that does not exist.\n"); //printf("Please generate the new stencil using the code generator.\n"); // n will never be zero - this is to silence compiler warnings. //if (n==0) printf("in=%p out=%p\n", in, out); //abort(); } __global__ void add(const int n, prk_float * in) { auto i = blockIdx.x * blockDim.x + threadIdx.x; auto j = blockIdx.y * blockDim.y + threadIdx.y; if ((i<n) && (j<n)) { in[i*n+j] += (prk_float)1; } } int main(int argc, char* argv[]) { std::cout << "Parallel Research Kernels version " << PRKVERSION << std::endl; std::cout << "C++11/CUDA Stencil execution on 2D grid" << std::endl; prk::CUDA::info info; info.print(); ////////////////////////////////////////////////////////////////////// // Process and test input parameters ////////////////////////////////////////////////////////////////////// int iterations, n, radius, tile_size; bool star = true; try { if (argc < 3) { throw "Usage: <# iterations> <array dimension> [<tile_size> <star/grid> <radius>]"; } // number of times to run the algorithm iterations = std::atoi(argv[1]); if (iterations < 1) { throw "ERROR: iterations must be >= 1"; } // linear grid dimension n = std::atoi(argv[2]); if (n < 1) { throw "ERROR: grid dimension must be positive"; } else if (n > prk::get_max_matrix_size()) { throw "ERROR: grid dimension too large - overflow risk"; } // default tile size for tiling of local transpose tile_size = 32; if (argc > 3) { tile_size = std::atoi(argv[3]); if (tile_size <= 0) tile_size = n; if (tile_size > n) tile_size = n; if (tile_size > 32) { std::cout << "Warning: tile_size > 32 may lead to incorrect results (observed for CUDA 9.0 on GV100).\n"; } } // stencil pattern if (argc > 4) { auto stencil = std::string(argv[4]); auto grid = std::string("grid"); star = (stencil == grid) ? false : true; } // stencil radius radius = 2; if (argc > 5) { radius = std::atoi(argv[5]); } if ( (radius < 1) || (2*radius+1 > n) ) { throw "ERROR: Stencil radius negative or too large"; } } catch (const char * e) { std::cout << e << std::endl; return 1; } std::cout << "Number of iterations = " << iterations << std::endl; std::cout << "Grid size = " << n << std::endl; std::cout << "Tile size = " << tile_size << std::endl; std::cout << "Type of stencil = " << (star ? "star" : "grid") << std::endl; std::cout << "Radius of stencil = " << radius << std::endl; auto stencil = nothing; if (star) { switch (radius) { case 1: stencil = star1; break; case 2: stencil = star2; break; case 3: stencil = star3; break; case 4: stencil = star4; break; case 5: stencil = star5; break; } } else { switch (radius) { case 1: stencil = grid1; break; case 2: stencil = grid2; break; case 3: stencil = grid3; break; case 4: stencil = grid4; break; case 5: stencil = grid5; break; } } dim3 dimGrid(prk::divceil(n,tile_size),prk::divceil(n,tile_size),1); dim3 dimBlock(tile_size, tile_size, 1); info.checkDims(dimBlock, dimGrid); ////////////////////////////////////////////////////////////////////// // Allocate space and perform the computation ////////////////////////////////////////////////////////////////////// double stencil_time{0}; const size_t nelems = (size_t)n * (size_t)n; const size_t bytes = nelems * sizeof(prk_float); prk_float * h_in; prk_float * h_out; prk::CUDA::check( hipHostMalloc((void**)&h_in, bytes) ); prk::CUDA::check( hipHostMalloc((void**)&h_out, bytes) ); for (int i=0; i<n; i++) { for (int j=0; j<n; j++) { h_in[i*n+j] = static_cast<prk_float>(i+j); h_out[i*n+j] = static_cast<prk_float>(0); } } // copy input from host to device prk_float * d_in; prk_float * d_out; prk::CUDA::check( hipMalloc((void**)&d_in, bytes) ); prk::CUDA::check( hipMalloc((void**)&d_out, bytes) ); prk::CUDA::check( hipMemcpy(d_in, &(h_in[0]), bytes, hipMemcpyHostToDevice) ); prk::CUDA::check( hipMemcpy(d_out, &(h_out[0]), bytes, hipMemcpyHostToDevice) ); for (int iter = 0; iter<=iterations; iter++) { if (iter==1) stencil_time = prk::wtime(); // Apply the stencil operator hipLaunchKernelGGL(( stencil), dim3(dimGrid), dim3(dimBlock), 0, 0, n, d_in, d_out); // Add constant to solution to force refresh of neighbor data, if any hipLaunchKernelGGL(( add), dim3(dimGrid), dim3(dimBlock), 0, 0, n, d_in); prk::CUDA::check( hipDeviceSynchronize() ); } stencil_time = prk::wtime() - stencil_time; // copy output back to host prk::CUDA::check( hipMemcpy(&(h_out[0]), d_out, bytes, hipMemcpyDeviceToHost) ); #ifdef VERBOSE // copy input back to host - debug only prk::CUDA::check( hipMemcpy(&(h_in[0]), d_in, bytes, hipMemcpyDeviceToHost) ); #endif prk::CUDA::check( hipFree(d_out) ); prk::CUDA::check( hipFree(d_in) ); ////////////////////////////////////////////////////////////////////// // Analyze and output results. ////////////////////////////////////////////////////////////////////// // interior of grid with respect to stencil size_t active_points = static_cast<size_t>(n-2*radius)*static_cast<size_t>(n-2*radius); double norm = 0.0; for (int i=radius; i<n-radius; i++) { for (int j=radius; j<n-radius; j++) { norm += prk::abs(h_out[i*n+j]); } } norm /= active_points; // verify correctness const double epsilon = 1.0e-8; double reference_norm = 2.*(iterations+1.); if (prk::abs(norm-reference_norm) > epsilon) { std::cout << "ERROR: L1 norm = " << norm << " Reference L1 norm = " << reference_norm << std::endl; return 1; } else { std::cout << "Solution validates" << std::endl; #ifdef VERBOSE std::cout << "L1 norm = " << norm << " Reference L1 norm = " << reference_norm << std::endl; #endif const int stencil_size = star ? 4*radius+1 : (2*radius+1)*(2*radius+1); size_t flops = (2L*(size_t)stencil_size+1L) * active_points; auto avgtime = stencil_time/iterations; std::cout << "Rate (MFlops/s): " << 1.0e-6 * static_cast<double>(flops)/avgtime << " Avg time (s): " << avgtime << std::endl; } return 0; }
00b94b246a73b6c547bd37301161caec3b0fe60c.cu
/// /// Copyright (c) 2013, Intel Corporation /// /// Redistribution and use in source and binary forms, with or without /// modification, are permitted provided that the following conditions /// are met: /// /// * Redistributions of source code must retain the above copyright /// notice, this list of conditions and the following disclaimer. /// * Redistributions in binary form must reproduce the above /// copyright notice, this list of conditions and the following /// disclaimer in the documentation and/or other materials provided /// with the distribution. /// * Neither the name of Intel Corporation nor the names of its /// contributors may be used to endorse or promote products /// derived from this software without specific prior written /// permission. /// /// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS /// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT /// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS /// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE /// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, /// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, /// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; /// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER /// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT /// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN /// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE /// POSSIBILITY OF SUCH DAMAGE. ////////////////////////////////////////////////////////////////////// /// /// NAME: Stencil /// /// PURPOSE: This program tests the efficiency with which a space-invariant, /// linear, symmetric filter (stencil) can be applied to a square /// grid or image. /// /// USAGE: The program takes as input the linear /// dimension of the grid, and the number of iterations on the grid /// /// <progname> <iterations> <grid size> /// /// The output consists of diagnostics to make sure the /// algorithm worked, and of timing statistics. /// /// FUNCTIONS CALLED: /// /// Other than standard C functions, the following functions are used in /// this program: /// wtime() /// /// HISTORY: - Written by Rob Van der Wijngaart, February 2009. /// - RvdW: Removed unrolling pragmas for clarity; /// added constant to array "in" at end of each iteration to force /// refreshing of neighbor data in parallel versions; August 2013 /// C++11-ification by Jeff Hammond, May 2017. /// ////////////////////////////////////////////////////////////////////// #include "prk_util.h" #include "prk_cuda.h" #include "stencil_cuda.hpp" __global__ void nothing(const int n, const prk_float * in, prk_float * out) { //printf("You are trying to use a stencil that does not exist.\n"); //printf("Please generate the new stencil using the code generator.\n"); // n will never be zero - this is to silence compiler warnings. //if (n==0) printf("in=%p out=%p\n", in, out); //abort(); } __global__ void add(const int n, prk_float * in) { auto i = blockIdx.x * blockDim.x + threadIdx.x; auto j = blockIdx.y * blockDim.y + threadIdx.y; if ((i<n) && (j<n)) { in[i*n+j] += (prk_float)1; } } int main(int argc, char* argv[]) { std::cout << "Parallel Research Kernels version " << PRKVERSION << std::endl; std::cout << "C++11/CUDA Stencil execution on 2D grid" << std::endl; prk::CUDA::info info; info.print(); ////////////////////////////////////////////////////////////////////// // Process and test input parameters ////////////////////////////////////////////////////////////////////// int iterations, n, radius, tile_size; bool star = true; try { if (argc < 3) { throw "Usage: <# iterations> <array dimension> [<tile_size> <star/grid> <radius>]"; } // number of times to run the algorithm iterations = std::atoi(argv[1]); if (iterations < 1) { throw "ERROR: iterations must be >= 1"; } // linear grid dimension n = std::atoi(argv[2]); if (n < 1) { throw "ERROR: grid dimension must be positive"; } else if (n > prk::get_max_matrix_size()) { throw "ERROR: grid dimension too large - overflow risk"; } // default tile size for tiling of local transpose tile_size = 32; if (argc > 3) { tile_size = std::atoi(argv[3]); if (tile_size <= 0) tile_size = n; if (tile_size > n) tile_size = n; if (tile_size > 32) { std::cout << "Warning: tile_size > 32 may lead to incorrect results (observed for CUDA 9.0 on GV100).\n"; } } // stencil pattern if (argc > 4) { auto stencil = std::string(argv[4]); auto grid = std::string("grid"); star = (stencil == grid) ? false : true; } // stencil radius radius = 2; if (argc > 5) { radius = std::atoi(argv[5]); } if ( (radius < 1) || (2*radius+1 > n) ) { throw "ERROR: Stencil radius negative or too large"; } } catch (const char * e) { std::cout << e << std::endl; return 1; } std::cout << "Number of iterations = " << iterations << std::endl; std::cout << "Grid size = " << n << std::endl; std::cout << "Tile size = " << tile_size << std::endl; std::cout << "Type of stencil = " << (star ? "star" : "grid") << std::endl; std::cout << "Radius of stencil = " << radius << std::endl; auto stencil = nothing; if (star) { switch (radius) { case 1: stencil = star1; break; case 2: stencil = star2; break; case 3: stencil = star3; break; case 4: stencil = star4; break; case 5: stencil = star5; break; } } else { switch (radius) { case 1: stencil = grid1; break; case 2: stencil = grid2; break; case 3: stencil = grid3; break; case 4: stencil = grid4; break; case 5: stencil = grid5; break; } } dim3 dimGrid(prk::divceil(n,tile_size),prk::divceil(n,tile_size),1); dim3 dimBlock(tile_size, tile_size, 1); info.checkDims(dimBlock, dimGrid); ////////////////////////////////////////////////////////////////////// // Allocate space and perform the computation ////////////////////////////////////////////////////////////////////// double stencil_time{0}; const size_t nelems = (size_t)n * (size_t)n; const size_t bytes = nelems * sizeof(prk_float); prk_float * h_in; prk_float * h_out; prk::CUDA::check( cudaMallocHost((void**)&h_in, bytes) ); prk::CUDA::check( cudaMallocHost((void**)&h_out, bytes) ); for (int i=0; i<n; i++) { for (int j=0; j<n; j++) { h_in[i*n+j] = static_cast<prk_float>(i+j); h_out[i*n+j] = static_cast<prk_float>(0); } } // copy input from host to device prk_float * d_in; prk_float * d_out; prk::CUDA::check( cudaMalloc((void**)&d_in, bytes) ); prk::CUDA::check( cudaMalloc((void**)&d_out, bytes) ); prk::CUDA::check( cudaMemcpy(d_in, &(h_in[0]), bytes, cudaMemcpyHostToDevice) ); prk::CUDA::check( cudaMemcpy(d_out, &(h_out[0]), bytes, cudaMemcpyHostToDevice) ); for (int iter = 0; iter<=iterations; iter++) { if (iter==1) stencil_time = prk::wtime(); // Apply the stencil operator stencil<<<dimGrid, dimBlock>>>(n, d_in, d_out); // Add constant to solution to force refresh of neighbor data, if any add<<<dimGrid, dimBlock>>>(n, d_in); prk::CUDA::check( cudaDeviceSynchronize() ); } stencil_time = prk::wtime() - stencil_time; // copy output back to host prk::CUDA::check( cudaMemcpy(&(h_out[0]), d_out, bytes, cudaMemcpyDeviceToHost) ); #ifdef VERBOSE // copy input back to host - debug only prk::CUDA::check( cudaMemcpy(&(h_in[0]), d_in, bytes, cudaMemcpyDeviceToHost) ); #endif prk::CUDA::check( cudaFree(d_out) ); prk::CUDA::check( cudaFree(d_in) ); ////////////////////////////////////////////////////////////////////// // Analyze and output results. ////////////////////////////////////////////////////////////////////// // interior of grid with respect to stencil size_t active_points = static_cast<size_t>(n-2*radius)*static_cast<size_t>(n-2*radius); double norm = 0.0; for (int i=radius; i<n-radius; i++) { for (int j=radius; j<n-radius; j++) { norm += prk::abs(h_out[i*n+j]); } } norm /= active_points; // verify correctness const double epsilon = 1.0e-8; double reference_norm = 2.*(iterations+1.); if (prk::abs(norm-reference_norm) > epsilon) { std::cout << "ERROR: L1 norm = " << norm << " Reference L1 norm = " << reference_norm << std::endl; return 1; } else { std::cout << "Solution validates" << std::endl; #ifdef VERBOSE std::cout << "L1 norm = " << norm << " Reference L1 norm = " << reference_norm << std::endl; #endif const int stencil_size = star ? 4*radius+1 : (2*radius+1)*(2*radius+1); size_t flops = (2L*(size_t)stencil_size+1L) * active_points; auto avgtime = stencil_time/iterations; std::cout << "Rate (MFlops/s): " << 1.0e-6 * static_cast<double>(flops)/avgtime << " Avg time (s): " << avgtime << std::endl; } return 0; }
e9294ef0cfb2f212b3d0dcd5f3c62b70db053538.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <cstdint> #include <ctime> #define FIRSTBIT 0x8000000000000000 #define BLOCK_SIZE 1024 #define BLOCKS 2048 #define KNOWN_ZEROS 36 #define MSGLEN 1 __device__ int work = 1; // Host matrices: const int PC1[56] = { 57, 49, 41, 33, 25, 17, 9, 1, 58, 50, 42, 34, 26, 18, 10, 2, 59, 51, 43, 35, 27, 19, 11, 3, 60, 52, 44, 36, 63, 55, 47, 39, 31, 23, 15, 7, 62, 54, 46, 38, 30, 22, 14, 6, 61, 53, 45, 37, 29, 21, 13, 5, 28, 20, 12, 4 }; const int Rotations[16] = { 1, 1, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 1 }; const int PC2[48] = { 14, 17, 11, 24, 1, 5, 3, 28, 15, 6, 21, 10, 23, 19, 12, 4, 26, 8, 16, 7, 27, 20, 13, 2, 41, 52, 31, 37, 47, 55, 30, 40, 51, 45, 33, 48, 44, 49, 39, 56, 34, 53, 46, 42, 50, 36, 29, 32 }; const int InitialPermutation[64] = { 58, 50, 42, 34, 26, 18, 10, 2, 60, 52, 44, 36, 28, 20, 12, 4, 62, 54, 46, 38, 30, 22, 14, 6, 64, 56, 48, 40, 32, 24, 16, 8, 57, 49, 41, 33, 25, 17, 9, 1, 59, 51, 43, 35, 27, 19, 11, 3, 61, 53, 45, 37, 29, 21, 13, 5, 63, 55, 47, 39, 31, 23, 15, 7 }; const int FinalPermutation[64] = { 40, 8, 48, 16, 56, 24, 64, 32, 39, 7, 47, 15, 55, 23, 63, 31, 38, 6, 46, 14, 54, 22, 62, 30, 37, 5, 45, 13, 53, 21, 61, 29, 36, 4, 44, 12, 52, 20, 60, 28, 35, 3, 43, 11, 51, 19, 59, 27, 34, 2, 42, 10, 50, 18, 58, 26, 33, 1, 41, 9, 49, 17, 57, 25 }; const int DesExpansion[48] = { 32, 1, 2, 3, 4, 5, 4, 5, 6, 7, 8, 9, 8, 9, 10, 11, 12, 13, 12, 13, 14, 15, 16, 17, 16, 17, 18, 19, 20, 21, 20, 21, 22, 23, 24, 25, 24, 25, 26, 27, 28, 29, 28, 29, 30, 31, 32, 1 }; const int DesSbox[8][4][16] = { { { 14, 4, 13, 1, 2, 15, 11, 8, 3, 10, 6, 12, 5, 9, 0, 7 }, { 0, 15, 7, 4, 14, 2, 13, 1, 10, 6, 12, 11, 9, 5, 3, 8 }, { 4, 1, 14, 8, 13, 6, 2, 11, 15, 12, 9, 7, 3, 10, 5, 0 }, { 15, 12, 8, 2, 4, 9, 1, 7, 5, 11, 3, 14, 10, 0, 6, 13 }, }, { { 15, 1, 8, 14, 6, 11, 3, 4, 9, 7, 2, 13, 12, 0, 5, 10 }, { 3, 13, 4, 7, 15, 2, 8, 14, 12, 0, 1, 10, 6, 9, 11, 5 }, { 0, 14, 7, 11, 10, 4, 13, 1, 5, 8, 12, 6, 9, 3, 2, 15 }, { 13, 8, 10, 1, 3, 15, 4, 2, 11, 6, 7, 12, 0, 5, 14, 9 }, }, { { 10, 0, 9, 14, 6, 3, 15, 5, 1, 13, 12, 7, 11, 4, 2, 8 }, { 13, 7, 0, 9, 3, 4, 6, 10, 2, 8, 5, 14, 12, 11, 15, 1 }, { 13, 6, 4, 9, 8, 15, 3, 0, 11, 1, 2, 12, 5, 10, 14, 7 }, { 1, 10, 13, 0, 6, 9, 8, 7, 4, 15, 14, 3, 11, 5, 2, 12 }, }, { { 7, 13, 14, 3, 0, 6, 9, 10, 1, 2, 8, 5, 11, 12, 4, 15 }, { 13, 8, 11, 5, 6, 15, 0, 3, 4, 7, 2, 12, 1, 10, 14, 9 }, { 10, 6, 9, 0, 12, 11, 7, 13, 15, 1, 3, 14, 5, 2, 8, 4 }, { 3, 15, 0, 6, 10, 1, 13, 8, 9, 4, 5, 11, 12, 7, 2, 14 }, }, { { 2, 12, 4, 1, 7, 10, 11, 6, 8, 5, 3, 15, 13, 0, 14, 9 }, { 14, 11, 2, 12, 4, 7, 13, 1, 5, 0, 15, 10, 3, 9, 8, 6 }, { 4, 2, 1, 11, 10, 13, 7, 8, 15, 9, 12, 5, 6, 3, 0, 14 }, { 11, 8, 12, 7, 1, 14, 2, 13, 6, 15, 0, 9, 10, 4, 5, 3 }, }, { { 12, 1, 10, 15, 9, 2, 6, 8, 0, 13, 3, 4, 14, 7, 5, 11 }, { 10, 15, 4, 2, 7, 12, 9, 5, 6, 1, 13, 14, 0, 11, 3, 8 }, { 9, 14, 15, 5, 2, 8, 12, 3, 7, 0, 4, 10, 1, 13, 11, 6 }, { 4, 3, 2, 12, 9, 5, 15, 10, 11, 14, 1, 7, 6, 0, 8, 13 }, }, { { 4, 11, 2, 14, 15, 0, 8, 13, 3, 12, 9, 7, 5, 10, 6, 1 }, { 13, 0, 11, 7, 4, 9, 1, 10, 14, 3, 5, 12, 2, 15, 8, 6 }, { 1, 4, 11, 13, 12, 3, 7, 14, 10, 15, 6, 8, 0, 5, 9, 2 }, { 6, 11, 13, 8, 1, 4, 10, 7, 9, 5, 0, 15, 14, 2, 3, 12 }, }, { { 13, 2, 8, 4, 6, 15, 11, 1, 10, 9, 3, 14, 5, 0, 12, 7 }, { 1, 15, 13, 8, 10, 3, 7, 4, 12, 5, 6, 11, 0, 14, 9, 2 }, { 7, 11, 4, 1, 9, 12, 14, 2, 0, 6, 10, 13, 15, 3, 5, 8 }, { 2, 1, 14, 7, 4, 10, 8, 13, 15, 12, 9, 0, 3, 5, 6, 11 }, }, }; const int Pbox[32] = { 16, 7, 20, 21, 29, 12, 28, 17, 1, 15, 23, 26, 5, 18, 31, 10, 2, 8, 24, 14, 32, 27, 3, 9, 19, 13, 30, 6, 22, 11, 4, 25 }; // Device matrices: __constant__ int d_PC1[56] = { 57, 49, 41, 33, 25, 17, 9, 1, 58, 50, 42, 34, 26, 18, 10, 2, 59, 51, 43, 35, 27, 19, 11, 3, 60, 52, 44, 36, 63, 55, 47, 39, 31, 23, 15, 7, 62, 54, 46, 38, 30, 22, 14, 6, 61, 53, 45, 37, 29, 21, 13, 5, 28, 20, 12, 4 }; __constant__ int d_Rotations[16] = { 1, 1, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 1 }; __constant__ int d_PC2[48] = { 14, 17, 11, 24, 1, 5, 3, 28, 15, 6, 21, 10, 23, 19, 12, 4, 26, 8, 16, 7, 27, 20, 13, 2, 41, 52, 31, 37, 47, 55, 30, 40, 51, 45, 33, 48, 44, 49, 39, 56, 34, 53, 46, 42, 50, 36, 29, 32 }; __constant__ int d_InitialPermutation[64] = { 58, 50, 42, 34, 26, 18, 10, 2, 60, 52, 44, 36, 28, 20, 12, 4, 62, 54, 46, 38, 30, 22, 14, 6, 64, 56, 48, 40, 32, 24, 16, 8, 57, 49, 41, 33, 25, 17, 9, 1, 59, 51, 43, 35, 27, 19, 11, 3, 61, 53, 45, 37, 29, 21, 13, 5, 63, 55, 47, 39, 31, 23, 15, 7 }; __constant__ int d_FinalPermutation[64] = { 40, 8, 48, 16, 56, 24, 64, 32, 39, 7, 47, 15, 55, 23, 63, 31, 38, 6, 46, 14, 54, 22, 62, 30, 37, 5, 45, 13, 53, 21, 61, 29, 36, 4, 44, 12, 52, 20, 60, 28, 35, 3, 43, 11, 51, 19, 59, 27, 34, 2, 42, 10, 50, 18, 58, 26, 33, 1, 41, 9, 49, 17, 57, 25 }; __constant__ int d_DesExpansion[48] = { 32, 1, 2, 3, 4, 5, 4, 5, 6, 7, 8, 9, 8, 9, 10, 11, 12, 13, 12, 13, 14, 15, 16, 17, 16, 17, 18, 19, 20, 21, 20, 21, 22, 23, 24, 25, 24, 25, 26, 27, 28, 29, 28, 29, 30, 31, 32, 1 }; __constant__ int d_DesSbox[8][4][16] = { { { 14, 4, 13, 1, 2, 15, 11, 8, 3, 10, 6, 12, 5, 9, 0, 7 }, { 0, 15, 7, 4, 14, 2, 13, 1, 10, 6, 12, 11, 9, 5, 3, 8 }, { 4, 1, 14, 8, 13, 6, 2, 11, 15, 12, 9, 7, 3, 10, 5, 0 }, { 15, 12, 8, 2, 4, 9, 1, 7, 5, 11, 3, 14, 10, 0, 6, 13 }, }, { { 15, 1, 8, 14, 6, 11, 3, 4, 9, 7, 2, 13, 12, 0, 5, 10 }, { 3, 13, 4, 7, 15, 2, 8, 14, 12, 0, 1, 10, 6, 9, 11, 5 }, { 0, 14, 7, 11, 10, 4, 13, 1, 5, 8, 12, 6, 9, 3, 2, 15 }, { 13, 8, 10, 1, 3, 15, 4, 2, 11, 6, 7, 12, 0, 5, 14, 9 }, }, { { 10, 0, 9, 14, 6, 3, 15, 5, 1, 13, 12, 7, 11, 4, 2, 8 }, { 13, 7, 0, 9, 3, 4, 6, 10, 2, 8, 5, 14, 12, 11, 15, 1 }, { 13, 6, 4, 9, 8, 15, 3, 0, 11, 1, 2, 12, 5, 10, 14, 7 }, { 1, 10, 13, 0, 6, 9, 8, 7, 4, 15, 14, 3, 11, 5, 2, 12 }, }, { { 7, 13, 14, 3, 0, 6, 9, 10, 1, 2, 8, 5, 11, 12, 4, 15 }, { 13, 8, 11, 5, 6, 15, 0, 3, 4, 7, 2, 12, 1, 10, 14, 9 }, { 10, 6, 9, 0, 12, 11, 7, 13, 15, 1, 3, 14, 5, 2, 8, 4 }, { 3, 15, 0, 6, 10, 1, 13, 8, 9, 4, 5, 11, 12, 7, 2, 14 }, }, { { 2, 12, 4, 1, 7, 10, 11, 6, 8, 5, 3, 15, 13, 0, 14, 9 }, { 14, 11, 2, 12, 4, 7, 13, 1, 5, 0, 15, 10, 3, 9, 8, 6 }, { 4, 2, 1, 11, 10, 13, 7, 8, 15, 9, 12, 5, 6, 3, 0, 14 }, { 11, 8, 12, 7, 1, 14, 2, 13, 6, 15, 0, 9, 10, 4, 5, 3 }, }, { { 12, 1, 10, 15, 9, 2, 6, 8, 0, 13, 3, 4, 14, 7, 5, 11 }, { 10, 15, 4, 2, 7, 12, 9, 5, 6, 1, 13, 14, 0, 11, 3, 8 }, { 9, 14, 15, 5, 2, 8, 12, 3, 7, 0, 4, 10, 1, 13, 11, 6 }, { 4, 3, 2, 12, 9, 5, 15, 10, 11, 14, 1, 7, 6, 0, 8, 13 }, }, { { 4, 11, 2, 14, 15, 0, 8, 13, 3, 12, 9, 7, 5, 10, 6, 1 }, { 13, 0, 11, 7, 4, 9, 1, 10, 14, 3, 5, 12, 2, 15, 8, 6 }, { 1, 4, 11, 13, 12, 3, 7, 14, 10, 15, 6, 8, 0, 5, 9, 2 }, { 6, 11, 13, 8, 1, 4, 10, 7, 9, 5, 0, 15, 14, 2, 3, 12 }, }, { { 13, 2, 8, 4, 6, 15, 11, 1, 10, 9, 3, 14, 5, 0, 12, 7 }, { 1, 15, 13, 8, 10, 3, 7, 4, 12, 5, 6, 11, 0, 14, 9, 2 }, { 7, 11, 4, 1, 9, 12, 14, 2, 0, 6, 10, 13, 15, 3, 5, 8 }, { 2, 1, 14, 7, 4, 10, 8, 13, 15, 12, 9, 0, 3, 5, 6, 11 }, }, }; __constant__ int d_Pbox[32] = { 16, 7, 20, 21, 29, 12, 28, 17, 1, 15, 23, 26, 5, 18, 31, 10, 2, 8, 24, 14, 32, 27, 3, 9, 19, 13, 30, 6, 22, 11, 4, 25 }; /* * Generates 16 keys (saved in generating order when encrypting or reverse order when decrypting) and saves them into keys[] array */ __device__ __host__ void generate_keys(uint64_t basekey, bool reverse, const int PC1[], const int PC2[], const int Rotations[], uint64_t keys[]) { uint64_t first = 0; for (int i = 0; i < 56; i++) { if (basekey & ((uint64_t)1 << (63 - (PC1[i] - 1)))) first += ((uint64_t)1 << 63 - i); } uint64_t d[17]; uint64_t c[17]; const uint64_t mask = 0b0000000000000000000000000000111111111111111111111111111100000000; d[0] = (first & mask) << 28; //right half c[0] = ((first >> 28) & mask) << 28; //left half for (int i = 1; i <= 16; i++) { int shifts = Rotations[i - 1]; c[i] = c[i - 1] << shifts; d[i] = d[i - 1] << shifts; if (c[i - 1] & (uint64_t)1 << 63) c[i] += (uint64_t)1 << 35 + shifts; if (shifts == 2) if (c[i - 1] & (uint64_t)1 << 62) c[i] += (uint64_t)1 << 36; if (d[i - 1] & (uint64_t)1 << 63) d[i] += (uint64_t)1 << 35 + shifts; if (shifts == 2) if (d[i - 1] & (uint64_t)1 << 62) d[i] += (uint64_t)1 << 36; keys[i] = c[i] | (d[i] >> 28); uint64_t tmp = 0; for (int j = 0; j < 48; j++) { if (keys[i] & ((uint64_t)1 << (63 - (PC2[j] - 1)))) tmp += ((uint64_t)1 << 63 - j); } keys[i] = tmp; } if (reverse) { for (int i = 1; i <= 8; i++) { uint64_t tmp = keys[i]; keys[i] = keys[17 - i]; keys[17 - i] = tmp; } } } /* * Prints all v bits from start to end given by arguments */ __device__ __host__ void printbits(uint64_t v, int start = 0, int end = 64) { for (int ii = start; ii < end; ii++) { if (((v << ii) & FIRSTBIT) == (uint64_t)0) printf("0"); else printf("1"); } printf("\n"); } /* * Permutates block using initial permutation matrix or final permutation matrix (determined by the second argument) */ __device__ __host__ uint64_t permutate_block(uint64_t block, bool initial, const int InitialPermutation[], const int FinalPermutation[]) { uint64_t permutation = 0; for (int i = 0; i < 64; i++) { if (initial) { if (block & ((uint64_t)1 << (63 - (InitialPermutation[i] - 1)))) permutation += ((uint64_t)1 << 63 - i); } else if (block & ((uint64_t)1 << (63 - (FinalPermutation[i] - 1)))) permutation += ((uint64_t)1 << 63 - i); } return permutation; } /* * Expands block using Expansion matrix given by the argument */ __device__ __host__ uint64_t expand(uint64_t val, const int DesExpansion[]) { uint64_t res = 0; for (int i = 0; i < 48; i++) { if (val & ((uint64_t)1 << (63 - (DesExpansion[i] - 1)))) res += ((uint64_t)1 << 63 - i); } return res; } __device__ __host__ uint64_t calculate_sboxes(uint64_t val, const int DesSbox[8][4][16]) { uint64_t mask = 0b1111110000000000000000000000000000000000000000000000000000000000; uint64_t middle_bits = 0b0000000000000000000000000000000000000000000000000000000000011110; uint64_t ret = 0; for (int i = 0; i < 8; i++) { uint64_t current = (val & (mask >> (6 * i))) >> (64 - 6 * (i + 1)); int column = (current & middle_bits) >> 1; int row = ((current & (1 << 5)) >> 4) + (current & 1); uint64_t val = DesSbox[i][row][column]; ret += val << (60 - 4 * i); } return ret; } /* * Calculates block encryption/decryption */ __device__ __host__ uint64_t jechanka(uint64_t permutated, uint64_t keys[], const int PC1[], const int Rotations[], const int PC2[], const int InitialPermutation[], const int FinalPermutation[], const int DesExpansion[], const int Sbox[8][4][16], const int Pbox[]) { uint64_t l[17], r[17]; uint64_t mask = 0b1111111111111111111111111111111100000000000000000000000000000000; l[0] = permutated & mask; r[0] = (permutated << 32) & mask; for (int i = 1; i <= 16; i++) { l[i] = r[i - 1]; uint64_t v = calculate_sboxes(keys[i] ^ expand(r[i - 1], DesExpansion), Sbox); uint64_t res = 0; for (int j = 0; j < 32; j++) { if (v & ((uint64_t)1 << (63 - (Pbox[j] - 1)))) res += ((uint64_t)1 << 63 - j); } r[i] = l[i - 1] ^ res; } return permutate_block(r[16] + (l[16] >> 32), false, InitialPermutation, FinalPermutation); } /* * Main DES function - decrypts or encrypts whole message (length given by MSGLEN constant) */ __device__ __host__ void DES(uint64_t encryptedMessage[], uint64_t decryptedMessage[], uint64_t key, const int PC1[], const int Rotations[], const int PC2[], const int InitialPermutation[], const int FinalPermutation[], const int DesExpansion[], const int Sbox[8][4][16], const int Pbox[], bool encrypt) { uint64_t keys[17]; generate_keys(key, !encrypt, PC1, PC2, Rotations, keys); for (int i = 0; i < MSGLEN; i++) { if (encrypt) encryptedMessage[i] = jechanka(permutate_block(decryptedMessage[i], true, InitialPermutation, FinalPermutation), keys, PC1, Rotations, PC2, InitialPermutation, FinalPermutation, DesExpansion, Sbox, Pbox); else decryptedMessage[i] = jechanka(permutate_block(encryptedMessage[i], true, InitialPermutation, FinalPermutation), keys, PC1, Rotations, PC2, InitialPermutation, FinalPermutation, DesExpansion, Sbox, Pbox); } } __global__ void worker_thread(const uint64_t message[], uint64_t encrypted[], uint64_t decrypted[], int known_zeros) { uint64_t threadId = blockIdx.x * BLOCK_SIZE + threadIdx.x; uint64_t mask = 0b0000000000000000000000000000000000000000000000000000000001111111; uint64_t suffix = ((threadId & (mask << 14)) << 3) | ((threadId & (mask << 7)) << 2) | ((threadId & mask) << 1); uint64_t current_key = 0; uint64_t max = (uint64_t)1 << (35 - (known_zeros - known_zeros / 8)); uint64_t current_message[MSGLEN]; bool go; for (uint64_t i = 0; i < max && work == 1; i++) { current_key = (((i & (mask << 28)) << 5) | (((i & (mask << 21)) << 4) | ((i & (mask << 14)) << 3) | ((i & (mask << 7)) << 2) | ((i & mask) << 1)) << 24) | suffix; DES(encrypted, current_message, current_key, d_PC1, d_Rotations, d_PC2, d_InitialPermutation, d_FinalPermutation, d_DesExpansion, d_DesSbox, d_Pbox, false); go = true; for (int j = 0; j < MSGLEN; j++) { if (current_message[j] != message[j]) { go = false; break; } } if (go) { for (int j = 0; j < MSGLEN; j++) { decrypted[j] = current_message[j]; } work = 0; } } } /* * Decrypts message using brute-force method and CUDA */ hipError_t CudaDES(uint64_t plaintext[], uint64_t encrypted[], uint64_t decrypted[], uint64_t key) { hipError_t cudaStatus; uint64_t *d_plain, *d_enc, *d_dec; cudaStatus = hipMalloc(&d_plain, MSGLEN * sizeof(uint64_t)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc(&d_enc, MSGLEN * sizeof(uint64_t)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc(&d_dec, MSGLEN * sizeof(uint64_t)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMemcpy(d_plain, plaintext, MSGLEN * sizeof(uint64_t), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } cudaStatus = hipMemcpy(d_enc, encrypted, MSGLEN * sizeof(uint64_t), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } worker_thread << < BLOCKS, BLOCK_SIZE >> > (d_plain, d_enc, d_dec, KNOWN_ZEROS); cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "worker thread failed!"); goto Error; } cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize failed!"); goto Error; } cudaStatus = hipMemcpy(decrypted, d_dec, MSGLEN * sizeof(uint64_t), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } Error: hipFree(d_plain); hipFree(d_enc); hipFree(d_dec); return cudaStatus; } /* * Prints message in binary format */ void printmsg(uint64_t msg[]) { for (int i = 0; i < MSGLEN; i++) printbits(msg[i]); } /* * Compares original message with message decrypted with CUDA/CPU */ bool proper_decipher(uint64_t msg[], uint64_t decrypted[]) { for (int i = 0; i < MSGLEN; i++) { if (msg[i] != decrypted[i]) { return false; } } return true; } void cpuDES(uint64_t plaintext[], uint64_t encrypted[], uint64_t decrypted[], uint64_t key, int known_zeros) { int cpu_work = 1; for (uint64_t threadId = 0; threadId < (uint64_t)2 << 21 && cpu_work == 1; threadId++) { uint64_t mask = 0b0000000000000000000000000000000000000000000000000000000001111111; uint64_t suffix = ((threadId & (mask << 14)) << 3) | ((threadId & (mask << 7)) << 2) | ((threadId & mask) << 1); uint64_t current_key = 0; uint64_t max = (uint64_t)1 << (35 - (known_zeros - known_zeros / 8)); uint64_t current_message[MSGLEN]; bool go; for (uint64_t i = 0; i <= max && cpu_work == 1; i++) { current_key = (((i & (mask << 28)) << 5) | (((i & (mask << 21)) << 4) | ((i & (mask << 14)) << 3) | ((i & (mask << 7)) << 2) | ((i & mask) << 1)) << 24) | suffix; DES(encrypted, current_message, current_key, PC1, Rotations, PC2, InitialPermutation, FinalPermutation, DesExpansion, DesSbox, Pbox, false); go = true; for (int j = 0; j < MSGLEN; j++) { if (current_message[j] != plaintext[j]) { go = false; break; } } if (go) { for (int j = 0; j < MSGLEN; j++) { decrypted[j] = current_message[j]; } cpu_work = 0; } } } } int main() { uint64_t key = 0b0000000000000000000000000000000000001111111111111111111111111111; uint64_t msg[1] = { 0b00000000000100100011010001010110011110001001101010111100110111101111 }; uint64_t decrypted[1]; uint64_t encrypted[1]; bool success; printf("Plain text:\n"); printmsg(msg); DES(encrypted, msg, key, PC1, Rotations, PC2, InitialPermutation, FinalPermutation, DesExpansion, DesSbox, Pbox, true); printf("Encrypted:\n"); printmsg(encrypted); clock_t begin, end; double elapsed_secs; /* --------------------------------------- CUDA ----------------------------------------------------- */ begin = clock(); printf("Starting GPU DES cracking for %d known leading zeros...\n", KNOWN_ZEROS); hipError_t cudaStatus = CudaDES(msg, encrypted, decrypted, key); end = clock(); elapsed_secs = double(end - begin) / CLOCKS_PER_SEC; printf("Finished GPU DES cracking for known leading zeros count = %d.\nTime elapsed: %fs.\n", KNOWN_ZEROS, elapsed_secs); if (cudaStatus != hipSuccess) { printf("Cos sie, cos sie popsulo...\n"); } else { printf("Decrypted:\n"); printmsg(decrypted); success = proper_decipher(msg, decrypted); printf(success ? "SUCCESS\n" : "FAILURE\n"); } /* --------------------------------------- CPU ----------------------------------------------------- */ /*begin = clock(); printf("Starting CPU DES cracking for %d known leading zeros...\n", KNOWN_ZEROS); cpuDES(msg, encrypted, decrypted, key, KNOWN_ZEROS); end = clock(); elapsed_secs = double(end - begin) / CLOCKS_PER_SEC; printf("Finished CPU DES cracking for known leading zeros count = %d.\nTime elapsed: %fs.\n", KNOWN_ZEROS, elapsed_secs); printf("Decrypted:\n"); printmsg(decrypted); success = proper_decipher(msg, decrypted); printf(success ? "SUCCESS\n" : "FAILURE\n");*/ /* --------------------------------------- DEBUG ----------------------------------------------------- */ DES(encrypted, decrypted, key, PC1, Rotations, PC2, InitialPermutation, FinalPermutation, DesExpansion, DesSbox, Pbox, false); printf("Decrypted with proper key:\n"); printmsg(decrypted); success = proper_decipher(msg, decrypted); printf(success ? "SUCCESS\n" : "FAILURE\n"); printf("Press any key to exit program\n"); getchar(); return 0; }
e9294ef0cfb2f212b3d0dcd5f3c62b70db053538.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <cstdint> #include <ctime> #define FIRSTBIT 0x8000000000000000 #define BLOCK_SIZE 1024 #define BLOCKS 2048 #define KNOWN_ZEROS 36 #define MSGLEN 1 __device__ int work = 1; // Host matrices: const int PC1[56] = { 57, 49, 41, 33, 25, 17, 9, 1, 58, 50, 42, 34, 26, 18, 10, 2, 59, 51, 43, 35, 27, 19, 11, 3, 60, 52, 44, 36, 63, 55, 47, 39, 31, 23, 15, 7, 62, 54, 46, 38, 30, 22, 14, 6, 61, 53, 45, 37, 29, 21, 13, 5, 28, 20, 12, 4 }; const int Rotations[16] = { 1, 1, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 1 }; const int PC2[48] = { 14, 17, 11, 24, 1, 5, 3, 28, 15, 6, 21, 10, 23, 19, 12, 4, 26, 8, 16, 7, 27, 20, 13, 2, 41, 52, 31, 37, 47, 55, 30, 40, 51, 45, 33, 48, 44, 49, 39, 56, 34, 53, 46, 42, 50, 36, 29, 32 }; const int InitialPermutation[64] = { 58, 50, 42, 34, 26, 18, 10, 2, 60, 52, 44, 36, 28, 20, 12, 4, 62, 54, 46, 38, 30, 22, 14, 6, 64, 56, 48, 40, 32, 24, 16, 8, 57, 49, 41, 33, 25, 17, 9, 1, 59, 51, 43, 35, 27, 19, 11, 3, 61, 53, 45, 37, 29, 21, 13, 5, 63, 55, 47, 39, 31, 23, 15, 7 }; const int FinalPermutation[64] = { 40, 8, 48, 16, 56, 24, 64, 32, 39, 7, 47, 15, 55, 23, 63, 31, 38, 6, 46, 14, 54, 22, 62, 30, 37, 5, 45, 13, 53, 21, 61, 29, 36, 4, 44, 12, 52, 20, 60, 28, 35, 3, 43, 11, 51, 19, 59, 27, 34, 2, 42, 10, 50, 18, 58, 26, 33, 1, 41, 9, 49, 17, 57, 25 }; const int DesExpansion[48] = { 32, 1, 2, 3, 4, 5, 4, 5, 6, 7, 8, 9, 8, 9, 10, 11, 12, 13, 12, 13, 14, 15, 16, 17, 16, 17, 18, 19, 20, 21, 20, 21, 22, 23, 24, 25, 24, 25, 26, 27, 28, 29, 28, 29, 30, 31, 32, 1 }; const int DesSbox[8][4][16] = { { { 14, 4, 13, 1, 2, 15, 11, 8, 3, 10, 6, 12, 5, 9, 0, 7 }, { 0, 15, 7, 4, 14, 2, 13, 1, 10, 6, 12, 11, 9, 5, 3, 8 }, { 4, 1, 14, 8, 13, 6, 2, 11, 15, 12, 9, 7, 3, 10, 5, 0 }, { 15, 12, 8, 2, 4, 9, 1, 7, 5, 11, 3, 14, 10, 0, 6, 13 }, }, { { 15, 1, 8, 14, 6, 11, 3, 4, 9, 7, 2, 13, 12, 0, 5, 10 }, { 3, 13, 4, 7, 15, 2, 8, 14, 12, 0, 1, 10, 6, 9, 11, 5 }, { 0, 14, 7, 11, 10, 4, 13, 1, 5, 8, 12, 6, 9, 3, 2, 15 }, { 13, 8, 10, 1, 3, 15, 4, 2, 11, 6, 7, 12, 0, 5, 14, 9 }, }, { { 10, 0, 9, 14, 6, 3, 15, 5, 1, 13, 12, 7, 11, 4, 2, 8 }, { 13, 7, 0, 9, 3, 4, 6, 10, 2, 8, 5, 14, 12, 11, 15, 1 }, { 13, 6, 4, 9, 8, 15, 3, 0, 11, 1, 2, 12, 5, 10, 14, 7 }, { 1, 10, 13, 0, 6, 9, 8, 7, 4, 15, 14, 3, 11, 5, 2, 12 }, }, { { 7, 13, 14, 3, 0, 6, 9, 10, 1, 2, 8, 5, 11, 12, 4, 15 }, { 13, 8, 11, 5, 6, 15, 0, 3, 4, 7, 2, 12, 1, 10, 14, 9 }, { 10, 6, 9, 0, 12, 11, 7, 13, 15, 1, 3, 14, 5, 2, 8, 4 }, { 3, 15, 0, 6, 10, 1, 13, 8, 9, 4, 5, 11, 12, 7, 2, 14 }, }, { { 2, 12, 4, 1, 7, 10, 11, 6, 8, 5, 3, 15, 13, 0, 14, 9 }, { 14, 11, 2, 12, 4, 7, 13, 1, 5, 0, 15, 10, 3, 9, 8, 6 }, { 4, 2, 1, 11, 10, 13, 7, 8, 15, 9, 12, 5, 6, 3, 0, 14 }, { 11, 8, 12, 7, 1, 14, 2, 13, 6, 15, 0, 9, 10, 4, 5, 3 }, }, { { 12, 1, 10, 15, 9, 2, 6, 8, 0, 13, 3, 4, 14, 7, 5, 11 }, { 10, 15, 4, 2, 7, 12, 9, 5, 6, 1, 13, 14, 0, 11, 3, 8 }, { 9, 14, 15, 5, 2, 8, 12, 3, 7, 0, 4, 10, 1, 13, 11, 6 }, { 4, 3, 2, 12, 9, 5, 15, 10, 11, 14, 1, 7, 6, 0, 8, 13 }, }, { { 4, 11, 2, 14, 15, 0, 8, 13, 3, 12, 9, 7, 5, 10, 6, 1 }, { 13, 0, 11, 7, 4, 9, 1, 10, 14, 3, 5, 12, 2, 15, 8, 6 }, { 1, 4, 11, 13, 12, 3, 7, 14, 10, 15, 6, 8, 0, 5, 9, 2 }, { 6, 11, 13, 8, 1, 4, 10, 7, 9, 5, 0, 15, 14, 2, 3, 12 }, }, { { 13, 2, 8, 4, 6, 15, 11, 1, 10, 9, 3, 14, 5, 0, 12, 7 }, { 1, 15, 13, 8, 10, 3, 7, 4, 12, 5, 6, 11, 0, 14, 9, 2 }, { 7, 11, 4, 1, 9, 12, 14, 2, 0, 6, 10, 13, 15, 3, 5, 8 }, { 2, 1, 14, 7, 4, 10, 8, 13, 15, 12, 9, 0, 3, 5, 6, 11 }, }, }; const int Pbox[32] = { 16, 7, 20, 21, 29, 12, 28, 17, 1, 15, 23, 26, 5, 18, 31, 10, 2, 8, 24, 14, 32, 27, 3, 9, 19, 13, 30, 6, 22, 11, 4, 25 }; // Device matrices: __constant__ int d_PC1[56] = { 57, 49, 41, 33, 25, 17, 9, 1, 58, 50, 42, 34, 26, 18, 10, 2, 59, 51, 43, 35, 27, 19, 11, 3, 60, 52, 44, 36, 63, 55, 47, 39, 31, 23, 15, 7, 62, 54, 46, 38, 30, 22, 14, 6, 61, 53, 45, 37, 29, 21, 13, 5, 28, 20, 12, 4 }; __constant__ int d_Rotations[16] = { 1, 1, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 1 }; __constant__ int d_PC2[48] = { 14, 17, 11, 24, 1, 5, 3, 28, 15, 6, 21, 10, 23, 19, 12, 4, 26, 8, 16, 7, 27, 20, 13, 2, 41, 52, 31, 37, 47, 55, 30, 40, 51, 45, 33, 48, 44, 49, 39, 56, 34, 53, 46, 42, 50, 36, 29, 32 }; __constant__ int d_InitialPermutation[64] = { 58, 50, 42, 34, 26, 18, 10, 2, 60, 52, 44, 36, 28, 20, 12, 4, 62, 54, 46, 38, 30, 22, 14, 6, 64, 56, 48, 40, 32, 24, 16, 8, 57, 49, 41, 33, 25, 17, 9, 1, 59, 51, 43, 35, 27, 19, 11, 3, 61, 53, 45, 37, 29, 21, 13, 5, 63, 55, 47, 39, 31, 23, 15, 7 }; __constant__ int d_FinalPermutation[64] = { 40, 8, 48, 16, 56, 24, 64, 32, 39, 7, 47, 15, 55, 23, 63, 31, 38, 6, 46, 14, 54, 22, 62, 30, 37, 5, 45, 13, 53, 21, 61, 29, 36, 4, 44, 12, 52, 20, 60, 28, 35, 3, 43, 11, 51, 19, 59, 27, 34, 2, 42, 10, 50, 18, 58, 26, 33, 1, 41, 9, 49, 17, 57, 25 }; __constant__ int d_DesExpansion[48] = { 32, 1, 2, 3, 4, 5, 4, 5, 6, 7, 8, 9, 8, 9, 10, 11, 12, 13, 12, 13, 14, 15, 16, 17, 16, 17, 18, 19, 20, 21, 20, 21, 22, 23, 24, 25, 24, 25, 26, 27, 28, 29, 28, 29, 30, 31, 32, 1 }; __constant__ int d_DesSbox[8][4][16] = { { { 14, 4, 13, 1, 2, 15, 11, 8, 3, 10, 6, 12, 5, 9, 0, 7 }, { 0, 15, 7, 4, 14, 2, 13, 1, 10, 6, 12, 11, 9, 5, 3, 8 }, { 4, 1, 14, 8, 13, 6, 2, 11, 15, 12, 9, 7, 3, 10, 5, 0 }, { 15, 12, 8, 2, 4, 9, 1, 7, 5, 11, 3, 14, 10, 0, 6, 13 }, }, { { 15, 1, 8, 14, 6, 11, 3, 4, 9, 7, 2, 13, 12, 0, 5, 10 }, { 3, 13, 4, 7, 15, 2, 8, 14, 12, 0, 1, 10, 6, 9, 11, 5 }, { 0, 14, 7, 11, 10, 4, 13, 1, 5, 8, 12, 6, 9, 3, 2, 15 }, { 13, 8, 10, 1, 3, 15, 4, 2, 11, 6, 7, 12, 0, 5, 14, 9 }, }, { { 10, 0, 9, 14, 6, 3, 15, 5, 1, 13, 12, 7, 11, 4, 2, 8 }, { 13, 7, 0, 9, 3, 4, 6, 10, 2, 8, 5, 14, 12, 11, 15, 1 }, { 13, 6, 4, 9, 8, 15, 3, 0, 11, 1, 2, 12, 5, 10, 14, 7 }, { 1, 10, 13, 0, 6, 9, 8, 7, 4, 15, 14, 3, 11, 5, 2, 12 }, }, { { 7, 13, 14, 3, 0, 6, 9, 10, 1, 2, 8, 5, 11, 12, 4, 15 }, { 13, 8, 11, 5, 6, 15, 0, 3, 4, 7, 2, 12, 1, 10, 14, 9 }, { 10, 6, 9, 0, 12, 11, 7, 13, 15, 1, 3, 14, 5, 2, 8, 4 }, { 3, 15, 0, 6, 10, 1, 13, 8, 9, 4, 5, 11, 12, 7, 2, 14 }, }, { { 2, 12, 4, 1, 7, 10, 11, 6, 8, 5, 3, 15, 13, 0, 14, 9 }, { 14, 11, 2, 12, 4, 7, 13, 1, 5, 0, 15, 10, 3, 9, 8, 6 }, { 4, 2, 1, 11, 10, 13, 7, 8, 15, 9, 12, 5, 6, 3, 0, 14 }, { 11, 8, 12, 7, 1, 14, 2, 13, 6, 15, 0, 9, 10, 4, 5, 3 }, }, { { 12, 1, 10, 15, 9, 2, 6, 8, 0, 13, 3, 4, 14, 7, 5, 11 }, { 10, 15, 4, 2, 7, 12, 9, 5, 6, 1, 13, 14, 0, 11, 3, 8 }, { 9, 14, 15, 5, 2, 8, 12, 3, 7, 0, 4, 10, 1, 13, 11, 6 }, { 4, 3, 2, 12, 9, 5, 15, 10, 11, 14, 1, 7, 6, 0, 8, 13 }, }, { { 4, 11, 2, 14, 15, 0, 8, 13, 3, 12, 9, 7, 5, 10, 6, 1 }, { 13, 0, 11, 7, 4, 9, 1, 10, 14, 3, 5, 12, 2, 15, 8, 6 }, { 1, 4, 11, 13, 12, 3, 7, 14, 10, 15, 6, 8, 0, 5, 9, 2 }, { 6, 11, 13, 8, 1, 4, 10, 7, 9, 5, 0, 15, 14, 2, 3, 12 }, }, { { 13, 2, 8, 4, 6, 15, 11, 1, 10, 9, 3, 14, 5, 0, 12, 7 }, { 1, 15, 13, 8, 10, 3, 7, 4, 12, 5, 6, 11, 0, 14, 9, 2 }, { 7, 11, 4, 1, 9, 12, 14, 2, 0, 6, 10, 13, 15, 3, 5, 8 }, { 2, 1, 14, 7, 4, 10, 8, 13, 15, 12, 9, 0, 3, 5, 6, 11 }, }, }; __constant__ int d_Pbox[32] = { 16, 7, 20, 21, 29, 12, 28, 17, 1, 15, 23, 26, 5, 18, 31, 10, 2, 8, 24, 14, 32, 27, 3, 9, 19, 13, 30, 6, 22, 11, 4, 25 }; /* * Generates 16 keys (saved in generating order when encrypting or reverse order when decrypting) and saves them into keys[] array */ __device__ __host__ void generate_keys(uint64_t basekey, bool reverse, const int PC1[], const int PC2[], const int Rotations[], uint64_t keys[]) { uint64_t first = 0; for (int i = 0; i < 56; i++) { if (basekey & ((uint64_t)1 << (63 - (PC1[i] - 1)))) first += ((uint64_t)1 << 63 - i); } uint64_t d[17]; uint64_t c[17]; const uint64_t mask = 0b0000000000000000000000000000111111111111111111111111111100000000; d[0] = (first & mask) << 28; //right half c[0] = ((first >> 28) & mask) << 28; //left half for (int i = 1; i <= 16; i++) { int shifts = Rotations[i - 1]; c[i] = c[i - 1] << shifts; d[i] = d[i - 1] << shifts; if (c[i - 1] & (uint64_t)1 << 63) c[i] += (uint64_t)1 << 35 + shifts; if (shifts == 2) if (c[i - 1] & (uint64_t)1 << 62) c[i] += (uint64_t)1 << 36; if (d[i - 1] & (uint64_t)1 << 63) d[i] += (uint64_t)1 << 35 + shifts; if (shifts == 2) if (d[i - 1] & (uint64_t)1 << 62) d[i] += (uint64_t)1 << 36; keys[i] = c[i] | (d[i] >> 28); uint64_t tmp = 0; for (int j = 0; j < 48; j++) { if (keys[i] & ((uint64_t)1 << (63 - (PC2[j] - 1)))) tmp += ((uint64_t)1 << 63 - j); } keys[i] = tmp; } if (reverse) { for (int i = 1; i <= 8; i++) { uint64_t tmp = keys[i]; keys[i] = keys[17 - i]; keys[17 - i] = tmp; } } } /* * Prints all v bits from start to end given by arguments */ __device__ __host__ void printbits(uint64_t v, int start = 0, int end = 64) { for (int ii = start; ii < end; ii++) { if (((v << ii) & FIRSTBIT) == (uint64_t)0) printf("0"); else printf("1"); } printf("\n"); } /* * Permutates block using initial permutation matrix or final permutation matrix (determined by the second argument) */ __device__ __host__ uint64_t permutate_block(uint64_t block, bool initial, const int InitialPermutation[], const int FinalPermutation[]) { uint64_t permutation = 0; for (int i = 0; i < 64; i++) { if (initial) { if (block & ((uint64_t)1 << (63 - (InitialPermutation[i] - 1)))) permutation += ((uint64_t)1 << 63 - i); } else if (block & ((uint64_t)1 << (63 - (FinalPermutation[i] - 1)))) permutation += ((uint64_t)1 << 63 - i); } return permutation; } /* * Expands block using Expansion matrix given by the argument */ __device__ __host__ uint64_t expand(uint64_t val, const int DesExpansion[]) { uint64_t res = 0; for (int i = 0; i < 48; i++) { if (val & ((uint64_t)1 << (63 - (DesExpansion[i] - 1)))) res += ((uint64_t)1 << 63 - i); } return res; } __device__ __host__ uint64_t calculate_sboxes(uint64_t val, const int DesSbox[8][4][16]) { uint64_t mask = 0b1111110000000000000000000000000000000000000000000000000000000000; uint64_t middle_bits = 0b0000000000000000000000000000000000000000000000000000000000011110; uint64_t ret = 0; for (int i = 0; i < 8; i++) { uint64_t current = (val & (mask >> (6 * i))) >> (64 - 6 * (i + 1)); int column = (current & middle_bits) >> 1; int row = ((current & (1 << 5)) >> 4) + (current & 1); uint64_t val = DesSbox[i][row][column]; ret += val << (60 - 4 * i); } return ret; } /* * Calculates block encryption/decryption */ __device__ __host__ uint64_t jechanka(uint64_t permutated, uint64_t keys[], const int PC1[], const int Rotations[], const int PC2[], const int InitialPermutation[], const int FinalPermutation[], const int DesExpansion[], const int Sbox[8][4][16], const int Pbox[]) { uint64_t l[17], r[17]; uint64_t mask = 0b1111111111111111111111111111111100000000000000000000000000000000; l[0] = permutated & mask; r[0] = (permutated << 32) & mask; for (int i = 1; i <= 16; i++) { l[i] = r[i - 1]; uint64_t v = calculate_sboxes(keys[i] ^ expand(r[i - 1], DesExpansion), Sbox); uint64_t res = 0; for (int j = 0; j < 32; j++) { if (v & ((uint64_t)1 << (63 - (Pbox[j] - 1)))) res += ((uint64_t)1 << 63 - j); } r[i] = l[i - 1] ^ res; } return permutate_block(r[16] + (l[16] >> 32), false, InitialPermutation, FinalPermutation); } /* * Main DES function - decrypts or encrypts whole message (length given by MSGLEN constant) */ __device__ __host__ void DES(uint64_t encryptedMessage[], uint64_t decryptedMessage[], uint64_t key, const int PC1[], const int Rotations[], const int PC2[], const int InitialPermutation[], const int FinalPermutation[], const int DesExpansion[], const int Sbox[8][4][16], const int Pbox[], bool encrypt) { uint64_t keys[17]; generate_keys(key, !encrypt, PC1, PC2, Rotations, keys); for (int i = 0; i < MSGLEN; i++) { if (encrypt) encryptedMessage[i] = jechanka(permutate_block(decryptedMessage[i], true, InitialPermutation, FinalPermutation), keys, PC1, Rotations, PC2, InitialPermutation, FinalPermutation, DesExpansion, Sbox, Pbox); else decryptedMessage[i] = jechanka(permutate_block(encryptedMessage[i], true, InitialPermutation, FinalPermutation), keys, PC1, Rotations, PC2, InitialPermutation, FinalPermutation, DesExpansion, Sbox, Pbox); } } __global__ void worker_thread(const uint64_t message[], uint64_t encrypted[], uint64_t decrypted[], int known_zeros) { uint64_t threadId = blockIdx.x * BLOCK_SIZE + threadIdx.x; uint64_t mask = 0b0000000000000000000000000000000000000000000000000000000001111111; uint64_t suffix = ((threadId & (mask << 14)) << 3) | ((threadId & (mask << 7)) << 2) | ((threadId & mask) << 1); uint64_t current_key = 0; uint64_t max = (uint64_t)1 << (35 - (known_zeros - known_zeros / 8)); uint64_t current_message[MSGLEN]; bool go; for (uint64_t i = 0; i < max && work == 1; i++) { current_key = (((i & (mask << 28)) << 5) | (((i & (mask << 21)) << 4) | ((i & (mask << 14)) << 3) | ((i & (mask << 7)) << 2) | ((i & mask) << 1)) << 24) | suffix; DES(encrypted, current_message, current_key, d_PC1, d_Rotations, d_PC2, d_InitialPermutation, d_FinalPermutation, d_DesExpansion, d_DesSbox, d_Pbox, false); go = true; for (int j = 0; j < MSGLEN; j++) { if (current_message[j] != message[j]) { go = false; break; } } if (go) { for (int j = 0; j < MSGLEN; j++) { decrypted[j] = current_message[j]; } work = 0; } } } /* * Decrypts message using brute-force method and CUDA */ cudaError_t CudaDES(uint64_t plaintext[], uint64_t encrypted[], uint64_t decrypted[], uint64_t key) { cudaError_t cudaStatus; uint64_t *d_plain, *d_enc, *d_dec; cudaStatus = cudaMalloc(&d_plain, MSGLEN * sizeof(uint64_t)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc(&d_enc, MSGLEN * sizeof(uint64_t)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc(&d_dec, MSGLEN * sizeof(uint64_t)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMemcpy(d_plain, plaintext, MSGLEN * sizeof(uint64_t), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } cudaStatus = cudaMemcpy(d_enc, encrypted, MSGLEN * sizeof(uint64_t), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } worker_thread << < BLOCKS, BLOCK_SIZE >> > (d_plain, d_enc, d_dec, KNOWN_ZEROS); cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "worker thread failed!"); goto Error; } cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize failed!"); goto Error; } cudaStatus = cudaMemcpy(decrypted, d_dec, MSGLEN * sizeof(uint64_t), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } Error: cudaFree(d_plain); cudaFree(d_enc); cudaFree(d_dec); return cudaStatus; } /* * Prints message in binary format */ void printmsg(uint64_t msg[]) { for (int i = 0; i < MSGLEN; i++) printbits(msg[i]); } /* * Compares original message with message decrypted with CUDA/CPU */ bool proper_decipher(uint64_t msg[], uint64_t decrypted[]) { for (int i = 0; i < MSGLEN; i++) { if (msg[i] != decrypted[i]) { return false; } } return true; } void cpuDES(uint64_t plaintext[], uint64_t encrypted[], uint64_t decrypted[], uint64_t key, int known_zeros) { int cpu_work = 1; for (uint64_t threadId = 0; threadId < (uint64_t)2 << 21 && cpu_work == 1; threadId++) { uint64_t mask = 0b0000000000000000000000000000000000000000000000000000000001111111; uint64_t suffix = ((threadId & (mask << 14)) << 3) | ((threadId & (mask << 7)) << 2) | ((threadId & mask) << 1); uint64_t current_key = 0; uint64_t max = (uint64_t)1 << (35 - (known_zeros - known_zeros / 8)); uint64_t current_message[MSGLEN]; bool go; for (uint64_t i = 0; i <= max && cpu_work == 1; i++) { current_key = (((i & (mask << 28)) << 5) | (((i & (mask << 21)) << 4) | ((i & (mask << 14)) << 3) | ((i & (mask << 7)) << 2) | ((i & mask) << 1)) << 24) | suffix; DES(encrypted, current_message, current_key, PC1, Rotations, PC2, InitialPermutation, FinalPermutation, DesExpansion, DesSbox, Pbox, false); go = true; for (int j = 0; j < MSGLEN; j++) { if (current_message[j] != plaintext[j]) { go = false; break; } } if (go) { for (int j = 0; j < MSGLEN; j++) { decrypted[j] = current_message[j]; } cpu_work = 0; } } } } int main() { uint64_t key = 0b0000000000000000000000000000000000001111111111111111111111111111; uint64_t msg[1] = { 0b00000000000100100011010001010110011110001001101010111100110111101111 }; uint64_t decrypted[1]; uint64_t encrypted[1]; bool success; printf("Plain text:\n"); printmsg(msg); DES(encrypted, msg, key, PC1, Rotations, PC2, InitialPermutation, FinalPermutation, DesExpansion, DesSbox, Pbox, true); printf("Encrypted:\n"); printmsg(encrypted); clock_t begin, end; double elapsed_secs; /* --------------------------------------- CUDA ----------------------------------------------------- */ begin = clock(); printf("Starting GPU DES cracking for %d known leading zeros...\n", KNOWN_ZEROS); cudaError_t cudaStatus = CudaDES(msg, encrypted, decrypted, key); end = clock(); elapsed_secs = double(end - begin) / CLOCKS_PER_SEC; printf("Finished GPU DES cracking for known leading zeros count = %d.\nTime elapsed: %fs.\n", KNOWN_ZEROS, elapsed_secs); if (cudaStatus != cudaSuccess) { printf("Cos sie, cos sie popsulo...\n"); } else { printf("Decrypted:\n"); printmsg(decrypted); success = proper_decipher(msg, decrypted); printf(success ? "SUCCESS\n" : "FAILURE\n"); } /* --------------------------------------- CPU ----------------------------------------------------- */ /*begin = clock(); printf("Starting CPU DES cracking for %d known leading zeros...\n", KNOWN_ZEROS); cpuDES(msg, encrypted, decrypted, key, KNOWN_ZEROS); end = clock(); elapsed_secs = double(end - begin) / CLOCKS_PER_SEC; printf("Finished CPU DES cracking for known leading zeros count = %d.\nTime elapsed: %fs.\n", KNOWN_ZEROS, elapsed_secs); printf("Decrypted:\n"); printmsg(decrypted); success = proper_decipher(msg, decrypted); printf(success ? "SUCCESS\n" : "FAILURE\n");*/ /* --------------------------------------- DEBUG ----------------------------------------------------- */ DES(encrypted, decrypted, key, PC1, Rotations, PC2, InitialPermutation, FinalPermutation, DesExpansion, DesSbox, Pbox, false); printf("Decrypted with proper key:\n"); printmsg(decrypted); success = proper_decipher(msg, decrypted); printf(success ? "SUCCESS\n" : "FAILURE\n"); printf("Press any key to exit program\n"); getchar(); return 0; }
fca2b2f18e1155731d596756e7c897fdb4fe7692.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #define BLOCK_SIZE 16 typedef struct { int width; int height; float* elements; } Matrix; __global__ void MatrixMultKern(const Matrix A, const Matrix B, const Matrix C) { // Calculate the column index of C and B int col = blockIdx.x * blockDim.x + threadIdx.x; // Calculate the row index of C and of A int row = blockIdx.y * blockDim.y + threadIdx.y; if ((row < A.height) && (col < B.width)) { float Cvalue = 0; // each thread computes one element of the block sub-matrix for (int k = 0; k < A.width; ++k) { Cvalue += A.elements[row * A.width + k] * B.elements[k * B.width + col]; } C.elements[row * C.width + col] = Cvalue; } } //Matrix multiplication - Host Code // Matrix dimensions are assumed to be multiples of BLOCK_SIZE void MatrixMult(const Matrix h_A, const Matrix h_B, Matrix h_C) { hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); // Load A and B into device memory Matrix d_A; d_A.width = h_A.width; d_A.height = h_A.height; size_t size = h_A.width * h_A.height * sizeof(float); hipError_t err = hipMalloc(&d_A.elements, size); printf("CUDA malloc h_A: %s\n", hipGetErrorString(err)); hipMemcpy(d_A.elements, h_A.elements, size, hipMemcpyHostToDevice); Matrix d_B; d_B.width = h_B.width; d_B.height = h_B.height; size = h_B.width * h_B.height * sizeof(float); err = hipMalloc(&d_B.elements, size); printf("CUDA malloc h_B: %s\n", hipGetErrorString(err)); hipMemcpy(d_B.elements, h_B.elements, size, hipMemcpyHostToDevice); // Allocate C in Device memory Matrix d_C; d_C.width = h_C.width; d_C.height = h_C.height; size = h_C.width * h_C.height * sizeof(float); err = hipMalloc(&d_C.elements, size); printf("CUDA malloc h_C: %s\n", hipGetErrorString(err)); // Invoke Kernel dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(d_B.width / dimBlock.x, d_A.height / dimBlock.y); hipEventRecord(start); MatrixMultKern << < dimGrid, dimBlock >> > (d_A, d_B, d_C); err = hipDeviceSynchronize(); hipEventRecord(stop); printf("Run kernel: %s\n", hipGetErrorString(err)); // Read C from Device to Host err = hipMemcpy(h_C.elements, d_C.elements, size, hipMemcpyDeviceToHost); printf("Copy h_C off device: %s\n", hipGetErrorString(err)); hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); printf("Matrix 1: Elapsed time was: %i %f\n milliseconds", h_A.width, milliseconds); // Free Device Memory hipFree(d_A.elements); hipFree(d_B.elements); hipFree(d_C.elements); } int pow(int y) { int test = 16; for (int i = 0; i < (y-1); i++) { test = test * 2; } return test; } int main(int argc, char* argv[]) { Matrix a, b, c; int size = 8192; // read dimensions of a and b a.height = size; a.width = size; b.height = a.width; b.width = size; a.elements = (float*)malloc(a.width * a.height * sizeof(float)); b.elements = (float*)malloc(b.width * b.height * sizeof(float)); c.height = a.height; c.width = b.width; c.elements = (float*)malloc(c.width * c.height * sizeof(float)); for (int i = 0; i < a.height; i++) for (int j = 0; j < a.width; j++) a.elements[i * a.width + j] = (float)(rand() % 3); for (int i = 0; i < b.height; i++) for (int j = 0; j < b.width; j++) b.elements[i * b.width + j] = (float)(rand() % 2); MatrixMult(a, b, c); //for (int i = 0; i < a.height; i++) { // for (int j = 0; j < a.width; j++) // printf("%f ", a.elements[i * a.width + j]); // printf("\n"); //} //printf("\n"); //for (int i = 0; i < b.height; i++) { // for (int j = 0; j < b.width; j++) // printf("%f ", b.elements[i * b.width + j]); // printf("\n"); //} //printf("\n"); //for (int i = 0; i < c.height; i++) { // for (int j = 0; j < c.width; j++) // printf("%f ", c.elements[i * c.width + j]); // printf("\n"); //} //printf("\n"); return 0; }
fca2b2f18e1155731d596756e7c897fdb4fe7692.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #define BLOCK_SIZE 16 typedef struct { int width; int height; float* elements; } Matrix; __global__ void MatrixMultKern(const Matrix A, const Matrix B, const Matrix C) { // Calculate the column index of C and B int col = blockIdx.x * blockDim.x + threadIdx.x; // Calculate the row index of C and of A int row = blockIdx.y * blockDim.y + threadIdx.y; if ((row < A.height) && (col < B.width)) { float Cvalue = 0; // each thread computes one element of the block sub-matrix for (int k = 0; k < A.width; ++k) { Cvalue += A.elements[row * A.width + k] * B.elements[k * B.width + col]; } C.elements[row * C.width + col] = Cvalue; } } //Matrix multiplication - Host Code // Matrix dimensions are assumed to be multiples of BLOCK_SIZE void MatrixMult(const Matrix h_A, const Matrix h_B, Matrix h_C) { cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); // Load A and B into device memory Matrix d_A; d_A.width = h_A.width; d_A.height = h_A.height; size_t size = h_A.width * h_A.height * sizeof(float); cudaError_t err = cudaMalloc(&d_A.elements, size); printf("CUDA malloc h_A: %s\n", cudaGetErrorString(err)); cudaMemcpy(d_A.elements, h_A.elements, size, cudaMemcpyHostToDevice); Matrix d_B; d_B.width = h_B.width; d_B.height = h_B.height; size = h_B.width * h_B.height * sizeof(float); err = cudaMalloc(&d_B.elements, size); printf("CUDA malloc h_B: %s\n", cudaGetErrorString(err)); cudaMemcpy(d_B.elements, h_B.elements, size, cudaMemcpyHostToDevice); // Allocate C in Device memory Matrix d_C; d_C.width = h_C.width; d_C.height = h_C.height; size = h_C.width * h_C.height * sizeof(float); err = cudaMalloc(&d_C.elements, size); printf("CUDA malloc h_C: %s\n", cudaGetErrorString(err)); // Invoke Kernel dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(d_B.width / dimBlock.x, d_A.height / dimBlock.y); cudaEventRecord(start); MatrixMultKern << < dimGrid, dimBlock >> > (d_A, d_B, d_C); err = cudaThreadSynchronize(); cudaEventRecord(stop); printf("Run kernel: %s\n", cudaGetErrorString(err)); // Read C from Device to Host err = cudaMemcpy(h_C.elements, d_C.elements, size, cudaMemcpyDeviceToHost); printf("Copy h_C off device: %s\n", cudaGetErrorString(err)); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); printf("Matrix 1: Elapsed time was: %i %f\n milliseconds", h_A.width, milliseconds); // Free Device Memory cudaFree(d_A.elements); cudaFree(d_B.elements); cudaFree(d_C.elements); } int pow(int y) { int test = 16; for (int i = 0; i < (y-1); i++) { test = test * 2; } return test; } int main(int argc, char* argv[]) { Matrix a, b, c; int size = 8192; // read dimensions of a and b a.height = size; a.width = size; b.height = a.width; b.width = size; a.elements = (float*)malloc(a.width * a.height * sizeof(float)); b.elements = (float*)malloc(b.width * b.height * sizeof(float)); c.height = a.height; c.width = b.width; c.elements = (float*)malloc(c.width * c.height * sizeof(float)); for (int i = 0; i < a.height; i++) for (int j = 0; j < a.width; j++) a.elements[i * a.width + j] = (float)(rand() % 3); for (int i = 0; i < b.height; i++) for (int j = 0; j < b.width; j++) b.elements[i * b.width + j] = (float)(rand() % 2); MatrixMult(a, b, c); //for (int i = 0; i < a.height; i++) { // for (int j = 0; j < a.width; j++) // printf("%f ", a.elements[i * a.width + j]); // printf("\n"); //} //printf("\n"); //for (int i = 0; i < b.height; i++) { // for (int j = 0; j < b.width; j++) // printf("%f ", b.elements[i * b.width + j]); // printf("\n"); //} //printf("\n"); //for (int i = 0; i < c.height; i++) { // for (int j = 0; j < c.width; j++) // printf("%f ", c.elements[i * c.width + j]); // printf("\n"); //} //printf("\n"); return 0; }
3438aa2a40ef608f51d372f7779bbff1b722a9f0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * ABserial.c * * Created on: Nov 11, 2014 * Author: nathan */ #include "ab_pt3.h" extern "C" { #include "boundBox.h" } extern "C" { #include "computeAuxiliaryGrid_pt3.h" } extern "C" { #include "compactAuxiliaryGrid_pt3.h" } //#include "writecell.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #include <sys/resource.h> #include <sys/times.h> #include <sys/time.h> #include "cuda_utils.h" #include "timer.h" #include <time.h> __global__ void wd_ab_parallel_pt3_t5(double *cellCenters, double *faceCenters, double *box, struct cell_pt3 *compAuxCells, int size_c, int size_f, int numAuxCells, double auxDiag, double *wallDist){ extern __shared__ double s_box []; int myId, includesAuxCells, j, index; double r, rtemp, rcurrent, rAux, c_x, c_y; myId = threadIdx.x + blockDim.x * blockIdx.x; // Keep array access bounded if (myId >= size_c){ return; } // Pull box pts into shared memory if (threadIdx.x < 16){ s_box[threadIdx.x] = box[threadIdx.x]; } c_x = cellCenters[2*myId]; c_y = cellCenters[2*myId+1]; // Compute initial radius r=1e9; for (j=0; j<8; j++){ rtemp = sqrt( pow((c_x-s_box[2*j]),2) + pow((c_y-s_box[2*j+1]),2) ); if (rtemp<r){ r=rtemp; } } // Loop through compacted auxCell array to see if any lie within rc includesAuxCells = 0; while(includesAuxCells == 0){ for (j=0; j<numAuxCells; j++){ rAux = sqrt( pow(c_x-compAuxCells[j].xcenter,2) + pow(c_y-compAuxCells[j].ycenter,2) ); // Increase r to be sure enough geometry is included if(rAux < r){ r += auxDiag*0.5; includesAuxCells=1; break; } else{ r += auxDiag; } } } /* * Loop through compacted auxCell array. For those that lie within r, * traverse through faces, compute wallDist and check for minimum */ for (j=0; j<numAuxCells; j++){ rAux = sqrt( pow(c_x-compAuxCells[j].xcenter,2) + pow(c_y-compAuxCells[j].ycenter,2)); // Check if auxCell is within radius of interest if(rAux < r){ index = 0; // Loop through faces and compute distance from grid cell center while(index < compAuxCells[j].numFaces){ rtemp = sqrt( pow(c_x-compAuxCells[j].faces[2*index],2) + pow(c_y-compAuxCells[j].faces[2*index+1],2)); // If dist is smaller than current wallDist, replace if(rtemp<rcurrent){ // wallDist[myId]=rtemp; rcurrent = rtemp; } index++; } } } // Store wallDistance to global array wallDist[myId] = rcurrent; } ////////////////////////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////////////////////// void ab_parallel_t3(double * xc, double * yc, double * xf, double * yf, int size_c, int size_f, double * wallDist){ double xmin; double xmax; double ymin; double ymax; //////////////////////////////////////////////////////////////////// // Pre-processing //////////////////////////////////////////////////////////////////// // Create geometry bounding box boundBox(xf,yf,size_f,&xmin,&xmax,&ymin,&ymax); // Create auxiliary grid int resI=80; int resJ=80; double auxDiag = sqrt( pow((xmax-xmin)/(double)(resI-1),2) + pow((ymax-ymin)/(double)(resJ-1),2)); int numAuxCells = (resI-1)*(resJ-1); int i, j, cellsWithFaces; struct cell_pt3 *auxCells; // auxCells = (struct cell_pt1 *)malloc(numAuxCells*sizeof(struct cell_pt1)); auxCells = new cell_pt3[numAuxCells]; computeAuxiliaryGrid_pt3(xmin,xmax,ymin,ymax,resI,resJ,auxCells); // Count number of auxiliary cells that contain geometry faces cellsWithFaces = 0; for (i=0; i<numAuxCells; i++){ for (j=0; j<size_f; j++){ if (xf[j] < auxCells[i].xmax && xf[j] > auxCells[i].xmin && yf[j] < auxCells[i].ymax && yf[j] > auxCells[i].ymin){ cellsWithFaces++; break; } } } // Allocate memory for compacted cells struct cell_pt3 * compAuxCells; // compAuxCells = (struct cell_pt1 *)malloc(cellsWithFaces*sizeof(struct cell_pt1)); compAuxCells = new cell_pt3[cellsWithFaces]; /////// compactAuxiliaryGrid_pt3(auxCells,numAuxCells,compAuxCells,xf,yf,size_f); /////// // Bounding box point arrays double xmid = (xmax+xmin)/2.0; double ymid = (ymax+ymin)/2.0; double xBoxPts[8] = {xmin, xmid, xmax, xmax, xmax, xmid, xmin, xmin}; double yBoxPts[8] = {ymin, ymin, ymin, ymid, ymax, ymax, ymax, ymid}; //////////////////////////////////////////////////////////////////////////////// // Combine xc,yc arrays for coallesced memory access in parallel t2 version //////////////////////////////////////////////////////////////////////////////// double *cellCenters; cellCenters = new double[2*size_c]; for (i=0; i<size_c; i++){ cellCenters[2*i] = xc[i]; cellCenters[2*i+1] = yc[i]; } double *faceCenters; faceCenters = new double[2*size_f]; for (i=0; i<size_f; i++){ faceCenters[2*i] = xf[i]; faceCenters[2*i+1] = yf[i]; } double *boxPts; boxPts = new double[16]; for (i=0; i<8; i++){ boxPts[2*i] = xBoxPts[i]; boxPts[2*i+1] = yBoxPts[i]; } double *auxCenters; auxCenters = new double[2*cellsWithFaces*sizeof(double)]; for (i=0; i<cellsWithFaces; i++){ auxCenters[2*i] = compAuxCells[i].xcenter; auxCenters[2*i+1] = compAuxCells[i].ycenter; } //////////////////////////////////////////////////////////////////// // Allocate device memory and copy data //////////////////////////////////////////////////////////////////// // bounding box double *d_xbox, *d_ybox, *d_box; checkCudaErrors(hipMalloc(&d_xbox,8*sizeof(double))); checkCudaErrors(hipMalloc(&d_ybox,8*sizeof(double))); checkCudaErrors(hipMalloc(&d_box,16*sizeof(double))); checkCudaErrors(hipMemcpy(d_xbox,xBoxPts,8*sizeof(double),hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_ybox,yBoxPts,8*sizeof(double),hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_box,boxPts,16*sizeof(double),hipMemcpyHostToDevice)); // grid and faces double *d_xc, *d_yc, *d_xf, *d_yf, *d_cellCenters, *d_faceCenters; checkCudaErrors(hipMalloc(&d_xc,size_c*sizeof(double))); checkCudaErrors(hipMalloc(&d_yc,size_c*sizeof(double))); checkCudaErrors(hipMalloc(&d_xf,size_c*sizeof(double))); checkCudaErrors(hipMalloc(&d_yf,size_c*sizeof(double))); checkCudaErrors(hipMalloc(&d_cellCenters,2*size_c*sizeof(double))); checkCudaErrors(hipMalloc(&d_faceCenters,2*size_f*sizeof(double))); checkCudaErrors(hipMemcpy(d_xc,xc,size_c*sizeof(double),hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_yc,yc,size_c*sizeof(double),hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_xf,xf,size_c*sizeof(double),hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_yf,yf,size_c*sizeof(double),hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_cellCenters,cellCenters,2*size_c*sizeof(double),hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_faceCenters,faceCenters,2*size_f*sizeof(double),hipMemcpyHostToDevice)); // auxCell structs struct cell_pt3 * d_compAuxCells; checkCudaErrors(hipMalloc((void **)&d_compAuxCells,cellsWithFaces*sizeof(struct cell_pt3))); checkCudaErrors(hipMemcpy(d_compAuxCells,compAuxCells,cellsWithFaces*sizeof(struct cell_pt3),hipMemcpyHostToDevice)); // auxCenter array double *d_auxCenters; checkCudaErrors(hipMalloc(&d_auxCenters,2*cellsWithFaces*sizeof(double))); checkCudaErrors(hipMemcpy(d_auxCenters,auxCenters,2*cellsWithFaces*sizeof(double),hipMemcpyHostToDevice)); // wallDist array double *d_wallDist; checkCudaErrors(hipMalloc(&d_wallDist,size_c*sizeof(double))); checkCudaErrors(hipMemcpy(d_wallDist,wallDist,size_c*sizeof(double),hipMemcpyHostToDevice)); //////////////////////////////////////////////////////////////////// // Wall Distance Calc //////////////////////////////////////////////////////////////////// GpuTimer timer; int threadsPerBlock, numBlocks; threadsPerBlock = 512; numBlocks = (size_c/threadsPerBlock)+1; // Reset wallDistance checkCudaErrors(hipMemcpy(d_wallDist,wallDist,size_c*sizeof(double),hipMemcpyHostToDevice)); timer.Start(); hipLaunchKernelGGL(( wd_ab_parallel_pt3_t5), dim3(numBlocks),dim3(threadsPerBlock),16*sizeof(double), 0, d_cellCenters,d_faceCenters,d_box,d_compAuxCells,size_c,size_f,cellsWithFaces,auxDiag,d_wallDist); timer.Stop(); printf("Advancing boundary - parallel pt3_T5(GpuTimer): \t %.0f milliseconds\n",timer.Elapsed()); // Copy wallDist back to host // checkCudaErrors(hipMemcpy(wallDist,d_wallDist,sizeof(double)*size_c,hipMemcpyDeviceToHost)); //////////////////////////////////////////////////////////////////// // //////////////////////////////////////////////////////////////////// }
3438aa2a40ef608f51d372f7779bbff1b722a9f0.cu
/* * ABserial.c * * Created on: Nov 11, 2014 * Author: nathan */ #include "ab_pt3.h" extern "C" { #include "boundBox.h" } extern "C" { #include "computeAuxiliaryGrid_pt3.h" } extern "C" { #include "compactAuxiliaryGrid_pt3.h" } //#include "writecell.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #include <sys/resource.h> #include <sys/times.h> #include <sys/time.h> #include "cuda_utils.h" #include "timer.h" #include <time.h> __global__ void wd_ab_parallel_pt3_t5(double *cellCenters, double *faceCenters, double *box, struct cell_pt3 *compAuxCells, int size_c, int size_f, int numAuxCells, double auxDiag, double *wallDist){ extern __shared__ double s_box []; int myId, includesAuxCells, j, index; double r, rtemp, rcurrent, rAux, c_x, c_y; myId = threadIdx.x + blockDim.x * blockIdx.x; // Keep array access bounded if (myId >= size_c){ return; } // Pull box pts into shared memory if (threadIdx.x < 16){ s_box[threadIdx.x] = box[threadIdx.x]; } c_x = cellCenters[2*myId]; c_y = cellCenters[2*myId+1]; // Compute initial radius r=1e9; for (j=0; j<8; j++){ rtemp = sqrt( pow((c_x-s_box[2*j]),2) + pow((c_y-s_box[2*j+1]),2) ); if (rtemp<r){ r=rtemp; } } // Loop through compacted auxCell array to see if any lie within rc includesAuxCells = 0; while(includesAuxCells == 0){ for (j=0; j<numAuxCells; j++){ rAux = sqrt( pow(c_x-compAuxCells[j].xcenter,2) + pow(c_y-compAuxCells[j].ycenter,2) ); // Increase r to be sure enough geometry is included if(rAux < r){ r += auxDiag*0.5; includesAuxCells=1; break; } else{ r += auxDiag; } } } /* * Loop through compacted auxCell array. For those that lie within r, * traverse through faces, compute wallDist and check for minimum */ for (j=0; j<numAuxCells; j++){ rAux = sqrt( pow(c_x-compAuxCells[j].xcenter,2) + pow(c_y-compAuxCells[j].ycenter,2)); // Check if auxCell is within radius of interest if(rAux < r){ index = 0; // Loop through faces and compute distance from grid cell center while(index < compAuxCells[j].numFaces){ rtemp = sqrt( pow(c_x-compAuxCells[j].faces[2*index],2) + pow(c_y-compAuxCells[j].faces[2*index+1],2)); // If dist is smaller than current wallDist, replace if(rtemp<rcurrent){ // wallDist[myId]=rtemp; rcurrent = rtemp; } index++; } } } // Store wallDistance to global array wallDist[myId] = rcurrent; } ////////////////////////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////////////////////// void ab_parallel_t3(double * xc, double * yc, double * xf, double * yf, int size_c, int size_f, double * wallDist){ double xmin; double xmax; double ymin; double ymax; //////////////////////////////////////////////////////////////////// // Pre-processing //////////////////////////////////////////////////////////////////// // Create geometry bounding box boundBox(xf,yf,size_f,&xmin,&xmax,&ymin,&ymax); // Create auxiliary grid int resI=80; int resJ=80; double auxDiag = sqrt( pow((xmax-xmin)/(double)(resI-1),2) + pow((ymax-ymin)/(double)(resJ-1),2)); int numAuxCells = (resI-1)*(resJ-1); int i, j, cellsWithFaces; struct cell_pt3 *auxCells; // auxCells = (struct cell_pt1 *)malloc(numAuxCells*sizeof(struct cell_pt1)); auxCells = new cell_pt3[numAuxCells]; computeAuxiliaryGrid_pt3(xmin,xmax,ymin,ymax,resI,resJ,auxCells); // Count number of auxiliary cells that contain geometry faces cellsWithFaces = 0; for (i=0; i<numAuxCells; i++){ for (j=0; j<size_f; j++){ if (xf[j] < auxCells[i].xmax && xf[j] > auxCells[i].xmin && yf[j] < auxCells[i].ymax && yf[j] > auxCells[i].ymin){ cellsWithFaces++; break; } } } // Allocate memory for compacted cells struct cell_pt3 * compAuxCells; // compAuxCells = (struct cell_pt1 *)malloc(cellsWithFaces*sizeof(struct cell_pt1)); compAuxCells = new cell_pt3[cellsWithFaces]; /////// compactAuxiliaryGrid_pt3(auxCells,numAuxCells,compAuxCells,xf,yf,size_f); /////// // Bounding box point arrays double xmid = (xmax+xmin)/2.0; double ymid = (ymax+ymin)/2.0; double xBoxPts[8] = {xmin, xmid, xmax, xmax, xmax, xmid, xmin, xmin}; double yBoxPts[8] = {ymin, ymin, ymin, ymid, ymax, ymax, ymax, ymid}; //////////////////////////////////////////////////////////////////////////////// // Combine xc,yc arrays for coallesced memory access in parallel t2 version //////////////////////////////////////////////////////////////////////////////// double *cellCenters; cellCenters = new double[2*size_c]; for (i=0; i<size_c; i++){ cellCenters[2*i] = xc[i]; cellCenters[2*i+1] = yc[i]; } double *faceCenters; faceCenters = new double[2*size_f]; for (i=0; i<size_f; i++){ faceCenters[2*i] = xf[i]; faceCenters[2*i+1] = yf[i]; } double *boxPts; boxPts = new double[16]; for (i=0; i<8; i++){ boxPts[2*i] = xBoxPts[i]; boxPts[2*i+1] = yBoxPts[i]; } double *auxCenters; auxCenters = new double[2*cellsWithFaces*sizeof(double)]; for (i=0; i<cellsWithFaces; i++){ auxCenters[2*i] = compAuxCells[i].xcenter; auxCenters[2*i+1] = compAuxCells[i].ycenter; } //////////////////////////////////////////////////////////////////// // Allocate device memory and copy data //////////////////////////////////////////////////////////////////// // bounding box double *d_xbox, *d_ybox, *d_box; checkCudaErrors(cudaMalloc(&d_xbox,8*sizeof(double))); checkCudaErrors(cudaMalloc(&d_ybox,8*sizeof(double))); checkCudaErrors(cudaMalloc(&d_box,16*sizeof(double))); checkCudaErrors(cudaMemcpy(d_xbox,xBoxPts,8*sizeof(double),cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_ybox,yBoxPts,8*sizeof(double),cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_box,boxPts,16*sizeof(double),cudaMemcpyHostToDevice)); // grid and faces double *d_xc, *d_yc, *d_xf, *d_yf, *d_cellCenters, *d_faceCenters; checkCudaErrors(cudaMalloc(&d_xc,size_c*sizeof(double))); checkCudaErrors(cudaMalloc(&d_yc,size_c*sizeof(double))); checkCudaErrors(cudaMalloc(&d_xf,size_c*sizeof(double))); checkCudaErrors(cudaMalloc(&d_yf,size_c*sizeof(double))); checkCudaErrors(cudaMalloc(&d_cellCenters,2*size_c*sizeof(double))); checkCudaErrors(cudaMalloc(&d_faceCenters,2*size_f*sizeof(double))); checkCudaErrors(cudaMemcpy(d_xc,xc,size_c*sizeof(double),cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_yc,yc,size_c*sizeof(double),cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_xf,xf,size_c*sizeof(double),cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_yf,yf,size_c*sizeof(double),cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_cellCenters,cellCenters,2*size_c*sizeof(double),cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_faceCenters,faceCenters,2*size_f*sizeof(double),cudaMemcpyHostToDevice)); // auxCell structs struct cell_pt3 * d_compAuxCells; checkCudaErrors(cudaMalloc((void **)&d_compAuxCells,cellsWithFaces*sizeof(struct cell_pt3))); checkCudaErrors(cudaMemcpy(d_compAuxCells,compAuxCells,cellsWithFaces*sizeof(struct cell_pt3),cudaMemcpyHostToDevice)); // auxCenter array double *d_auxCenters; checkCudaErrors(cudaMalloc(&d_auxCenters,2*cellsWithFaces*sizeof(double))); checkCudaErrors(cudaMemcpy(d_auxCenters,auxCenters,2*cellsWithFaces*sizeof(double),cudaMemcpyHostToDevice)); // wallDist array double *d_wallDist; checkCudaErrors(cudaMalloc(&d_wallDist,size_c*sizeof(double))); checkCudaErrors(cudaMemcpy(d_wallDist,wallDist,size_c*sizeof(double),cudaMemcpyHostToDevice)); //////////////////////////////////////////////////////////////////// // Wall Distance Calc //////////////////////////////////////////////////////////////////// GpuTimer timer; int threadsPerBlock, numBlocks; threadsPerBlock = 512; numBlocks = (size_c/threadsPerBlock)+1; // Reset wallDistance checkCudaErrors(cudaMemcpy(d_wallDist,wallDist,size_c*sizeof(double),cudaMemcpyHostToDevice)); timer.Start(); wd_ab_parallel_pt3_t5<<<numBlocks,threadsPerBlock,16*sizeof(double)>>>(d_cellCenters,d_faceCenters,d_box,d_compAuxCells,size_c,size_f,cellsWithFaces,auxDiag,d_wallDist); timer.Stop(); printf("Advancing boundary - parallel pt3_T5(GpuTimer): \t %.0f milliseconds\n",timer.Elapsed()); // Copy wallDist back to host // checkCudaErrors(cudaMemcpy(wallDist,d_wallDist,sizeof(double)*size_c,cudaMemcpyDeviceToHost)); //////////////////////////////////////////////////////////////////// // //////////////////////////////////////////////////////////////////// }
0fe29f063ec1e6903eddcb4620d163ddee25da0d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <math.h> const int N = 128; __global__ void laplacian(int n, double *da, double *dres) { int tid = threadIdx.x + blockIdx.x*blockDim.x; int x = tid / n; int y = tid - n * x; double x_l, x_r, y_l, y_r; if (tid < n*n) { y_l = y - 1 < 0 ? 0 : da[y - 1 + x*n]; x_l = x - 1 < 0 ? 0 : da[y + (x - 1)*n]; y_r = y + 1 > n - 1 ? 0 : da[y + 1 + x*n]; x_r = x + 1 > n - 1 ? 0 : da[y + (x + 1)*n]; dres[y + x*n] = (x_l + y_l + x_r + y_r) / 4.; if (x == 0 || y == n - 1 || x == n - 1) { dres[y + x*n] = 0; } } __syncthreads(); if (y == 0) dres[x*n] = 1; __syncthreads(); da[y + x*n] = dres[y + x*n]; } void toFile(FILE *f_name, double *res_i, int n) { for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { fprintf(f_name, "%f\t", res_i[i*N + j]); } } fprintf(f_name, "\n"); } int main() { double *da; double *dres; hipMalloc(&da, sizeof(double) * N * N); hipMalloc(&dres, sizeof(double) * N * N); FILE *f_name; f_name = fopen("res.txt", "w"); double *ha = (double *) calloc(sizeof(double), N * N); for (int i = 0; i < N; i++) { ha[i*N] = 1; } hipMemcpy(da, ha, sizeof(double) * N * N, hipMemcpyHostToDevice); dim3 dimBlock(1024); dim3 dimGrid(N * N / 1024); for (int k = 0; k < 200; k++) { hipLaunchKernelGGL(( laplacian), dim3(dimGrid), dim3(dimBlock), 0, 0, N, da, dres); hipMemcpy(ha, da, sizeof(double)*N*N, hipMemcpyDeviceToHost); toFile(f_name, ha, N); } return 0; }
0fe29f063ec1e6903eddcb4620d163ddee25da0d.cu
#include <stdio.h> #include <math.h> const int N = 128; __global__ void laplacian(int n, double *da, double *dres) { int tid = threadIdx.x + blockIdx.x*blockDim.x; int x = tid / n; int y = tid - n * x; double x_l, x_r, y_l, y_r; if (tid < n*n) { y_l = y - 1 < 0 ? 0 : da[y - 1 + x*n]; x_l = x - 1 < 0 ? 0 : da[y + (x - 1)*n]; y_r = y + 1 > n - 1 ? 0 : da[y + 1 + x*n]; x_r = x + 1 > n - 1 ? 0 : da[y + (x + 1)*n]; dres[y + x*n] = (x_l + y_l + x_r + y_r) / 4.; if (x == 0 || y == n - 1 || x == n - 1) { dres[y + x*n] = 0; } } __syncthreads(); if (y == 0) dres[x*n] = 1; __syncthreads(); da[y + x*n] = dres[y + x*n]; } void toFile(FILE *f_name, double *res_i, int n) { for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { fprintf(f_name, "%f\t", res_i[i*N + j]); } } fprintf(f_name, "\n"); } int main() { double *da; double *dres; cudaMalloc(&da, sizeof(double) * N * N); cudaMalloc(&dres, sizeof(double) * N * N); FILE *f_name; f_name = fopen("res.txt", "w"); double *ha = (double *) calloc(sizeof(double), N * N); for (int i = 0; i < N; i++) { ha[i*N] = 1; } cudaMemcpy(da, ha, sizeof(double) * N * N, cudaMemcpyHostToDevice); dim3 dimBlock(1024); dim3 dimGrid(N * N / 1024); for (int k = 0; k < 200; k++) { laplacian<<<dimGrid, dimBlock>>>(N, da, dres); cudaMemcpy(ha, da, sizeof(double)*N*N, cudaMemcpyDeviceToHost); toFile(f_name, ha, N); } return 0; }
9b42e7354266722a41b840fc24a36ea2d6426992.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> #include "cudacheck.h" __constant__ int ConstMem[2]; __global__ void SimulatorUpdateMagneticFieldsKernel(float* hz, float* ex, float* ey, float* Chzh, float* Chzex, float* Chzey) { int indx = threadIdx.x + blockIdx.x * blockDim.x; int indy = threadIdx.y + blockIdx.y * blockDim.y; int offset = (indx + 16) + ConstMem[0] * (indy + 16); int offsetC = indx + ConstMem[1] * indy; int offsetyp = offset + ConstMem[0]; int offsetxp = offset + 1; hz[offset] = Chzh[offsetC] * hz[offset] + Chzex[offsetC] * (ex[offsetyp] - ex[offset]) + Chzey[offsetC] * (ey[offsetxp] - ey[offset]); } __global__ void SimulatorUpdateElectricFieldsKernel(float* hz, float* ex, float* ey, float* Cexe, float* Cexhz, float* Ceye, float* Ceyhz) { int indx = threadIdx.x + blockIdx.x * blockDim.x; int indy = threadIdx.y + blockIdx.y * blockDim.y; int offset = (indx + 16) + ConstMem[0] * (indy + 16); int offsetC = indx + ConstMem[1] * indy; int offsetyn = offset - ConstMem[0]; int offsetxn = offset - 1; ex[offset] = Cexe[offsetC] * ex[offset] + Cexhz[offsetC] * (hz[offset] - hz[offsetyn]); ey[offset] = Ceye[offsetC] * ey[offset] + Ceyhz[offsetC] * (hz[offset] - hz[offsetxn]); } __global__ void Check(float* data, int Mconst) { int indx = threadIdx.x + blockIdx.x * blockDim.x; int indy = threadIdx.y + blockIdx.y * blockDim.y; int offsetC = indx + Mconst * indy; if (data[offsetC] != 1.0f) printf("Error! ThreadId is %d , %d\n value is %f", indx, indy, data[offsetC]); } void SimulatorUpdateMagneticFieldsInterface(float* Hz, float* Ex, float* Ey, float* Chzh, float* Chzex, float* Chzey, dim3 Grids, dim3 Threads) { SimulatorUpdateMagneticFieldsKernel << <Grids, Threads >> > (Hz, Ex, Ey, Chzh, Chzex, Chzey); } void SimulatorUpdateElectricFieldsInterface(float* Hz, float* Ex, float* Ey, float* Cexe, float* Cexhz, float* Ceye, float* Ceyhz, dim3 Grids, dim3 Threads) { SimulatorUpdateElectricFieldsKernel << <Grids, Threads >> > (Hz, Ex, Ey, Cexe, Cexhz, Ceye, Ceyhz); } void SimulatorSetConstInterface(int* src, int num) { static bool Done = false; if (!Done) { CUDACheck(hipMemcpyToSymbol(ConstMem, src, num * sizeof(int))); Done = true; } }
9b42e7354266722a41b840fc24a36ea2d6426992.cu
#include <cuda_runtime.h> #include <stdio.h> #include "cudacheck.h" __constant__ int ConstMem[2]; __global__ void SimulatorUpdateMagneticFieldsKernel(float* hz, float* ex, float* ey, float* Chzh, float* Chzex, float* Chzey) { int indx = threadIdx.x + blockIdx.x * blockDim.x; int indy = threadIdx.y + blockIdx.y * blockDim.y; int offset = (indx + 16) + ConstMem[0] * (indy + 16); int offsetC = indx + ConstMem[1] * indy; int offsetyp = offset + ConstMem[0]; int offsetxp = offset + 1; hz[offset] = Chzh[offsetC] * hz[offset] + Chzex[offsetC] * (ex[offsetyp] - ex[offset]) + Chzey[offsetC] * (ey[offsetxp] - ey[offset]); } __global__ void SimulatorUpdateElectricFieldsKernel(float* hz, float* ex, float* ey, float* Cexe, float* Cexhz, float* Ceye, float* Ceyhz) { int indx = threadIdx.x + blockIdx.x * blockDim.x; int indy = threadIdx.y + blockIdx.y * blockDim.y; int offset = (indx + 16) + ConstMem[0] * (indy + 16); int offsetC = indx + ConstMem[1] * indy; int offsetyn = offset - ConstMem[0]; int offsetxn = offset - 1; ex[offset] = Cexe[offsetC] * ex[offset] + Cexhz[offsetC] * (hz[offset] - hz[offsetyn]); ey[offset] = Ceye[offsetC] * ey[offset] + Ceyhz[offsetC] * (hz[offset] - hz[offsetxn]); } __global__ void Check(float* data, int Mconst) { int indx = threadIdx.x + blockIdx.x * blockDim.x; int indy = threadIdx.y + blockIdx.y * blockDim.y; int offsetC = indx + Mconst * indy; if (data[offsetC] != 1.0f) printf("Error! ThreadId is %d , %d\n value is %f", indx, indy, data[offsetC]); } void SimulatorUpdateMagneticFieldsInterface(float* Hz, float* Ex, float* Ey, float* Chzh, float* Chzex, float* Chzey, dim3 Grids, dim3 Threads) { SimulatorUpdateMagneticFieldsKernel << <Grids, Threads >> > (Hz, Ex, Ey, Chzh, Chzex, Chzey); } void SimulatorUpdateElectricFieldsInterface(float* Hz, float* Ex, float* Ey, float* Cexe, float* Cexhz, float* Ceye, float* Ceyhz, dim3 Grids, dim3 Threads) { SimulatorUpdateElectricFieldsKernel << <Grids, Threads >> > (Hz, Ex, Ey, Cexe, Cexhz, Ceye, Ceyhz); } void SimulatorSetConstInterface(int* src, int num) { static bool Done = false; if (!Done) { CUDACheck(cudaMemcpyToSymbol(ConstMem, src, num * sizeof(int))); Done = true; } }
f3f4775a10ff82bf47d9a215632281849408b1db.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * * OHIO STATE UNIVERSITY SOFTWARE DISTRIBUTION LICENSE * * Load-balanced sparse MTTKRP on GPUs (the Software) Copyright (c) 2019, The Ohio State * University. All rights reserved. * * The Software is available for download and use subject to the terms and * conditions of this License. Access or use of the Software constitutes acceptance * and agreement to the terms and conditions of this License. Redistribution and * use of the Software in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the capitalized paragraph below. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the capitalized paragraph below in the documentation * and/or other materials provided with the distribution. * * 3. The names of Ohio State University, or its faculty, staff or students may not * be used to endorse or promote products derived from the Software without * specific prior written permission. * * THIS SOFTWARE HAS BEEN APPROVED FOR PUBLIC RELEASE, UNLIMITED DISTRIBUTION. THE * SOFTWARE IS PROVIDED AS IS AND WITHOUT ANY EXPRESS, IMPLIED OR STATUTORY * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, WARRANTIES OF ACCURACY, COMPLETENESS, * NONINFRINGEMENT, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. ACCESS OR USE OF THE SOFTWARE IS ENTIRELY AT THE USERS RISK. IN * NO EVENT SHALL OHIO STATE UNIVERSITY OR ITS FACULTY, STAFF OR STUDENTS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. THE SOFTWARE * USER SHALL INDEMNIFY, DEFEND AND HOLD HARMLESS OHIO STATE UNIVERSITY AND ITS * FACULTY, STAFF AND STUDENTS FROM ANY AND ALL CLAIMS, ACTIONS, DAMAGES, LOSSES, * LIABILITIES, COSTS AND EXPENSES, INCLUDING ATTORNEYS FEES AND COURT COSTS, * DIRECTLY OR INDIRECTLY ARISING OUT OF OR IN CONNECTION WITH ACCESS OR USE OF THE * SOFTWARE. * */ /** * * Author: * Israt Nisa ([email protected]) * * Contacts: * Israt Nisa ([email protected]) * Jiajia Li ([email protected]) * */ #include <iostream> #include "mttkrp_gpu.h" #include <vector> inline hipError_t checkCuda(hipError_t result, int s){ if (result != hipSuccess) { fprintf(stderr, "CUDA Runtime Error in line : %s - %d\n", hipGetErrorString(result), s); assert(result == hipSuccess); } return result; } void cuda_timer_start(hipEvent_t start){ checkCuda(hipEventRecord(start), __LINE__); } void cuda_timer_stop(hipEvent_t start, hipEvent_t stop, float &mili){ checkCuda(hipEventRecord(stop), __LINE__); hipEventSynchronize(stop); checkCuda(hipEventElapsedTime(&mili, start, stop), __LINE__); hipDeviceSynchronize(); } // CUDA kernel call to do COO MTTKRP __global__ void mttkrp_COO_kernel(DTYPE *vals, ITYPE *dInds0, ITYPE *dInds1, ITYPE *dInds2, ITYPE nnz, DTYPE *dU0, DTYPE *dU1, DTYPE *dU2, ITYPE mode, ITYPE R){ unsigned int tId = threadIdx.x; unsigned int laneId = tId & 31; unsigned int gId = (blockIdx.x * blockDim.x + tId); unsigned int x = gId >> 5; if(x < nnz){ DTYPE tmp_val = 0; ITYPE idx0 = dInds0[x]; ITYPE idx1 = dInds1[x]; ITYPE idx2 = dInds2[x]; for(ITYPE r=laneId; r<R; r+=32) { tmp_val = vals[x] * dU1[idx1 * R + r] * dU2[idx2 * R + r]; atomicAdd(&dU0[idx0 * R + r], tmp_val); } } } // CUDA kernel call to do COO MTTKRP using loop __global__ void mttkrp_COO_kernel_loop(DTYPE * const vals, ITYPE * const dInds0, ITYPE * const dInds1, ITYPE * const dInds2, const ITYPE nnz, DTYPE *dU0, DTYPE * const dU1, DTYPE * const dU2, ITYPE mode, ITYPE R){ unsigned int tId = threadIdx.x; unsigned int laneId = tId & 31; unsigned int gId = (blockIdx.x * blockDim.x + tId); //like PARTI size_t num_loops_nnz = 1 * 32; size_t const nnz_per_loop = gridDim.x * blockDim.x; if(nnz > nnz_per_loop) { num_loops_nnz = ((nnz + nnz_per_loop - 1) / nnz_per_loop) << 5; } unsigned int x; for(size_t nl=0; nl<num_loops_nnz; ++nl) { x = (gId + nl * nnz_per_loop) >> 5; if(x < nnz){ DTYPE tmp_val = 0; ITYPE idx0 = dInds0[x]; ITYPE idx1 = dInds1[x]; ITYPE idx2 = dInds2[x]; for(ITYPE r=laneId; r<R; r+=32) { tmp_val = vals[x] * dU1[idx1 * R + r] * dU2[idx2 * R + r]; atomicAdd(&dU0[idx0 * R + r], tmp_val); } } __syncthreads(); } } // CUDA kernel call to do COO MTTKRP 4D __global__ void mttkrp_COO_kernel_4D(DTYPE *vals, ITYPE *dInds0, ITYPE *dInds1, ITYPE *dInds2, ITYPE *dInds3, ITYPE nnz, DTYPE *dU0, DTYPE *dU1, DTYPE *dU2, DTYPE *dU3, ITYPE mode, ITYPE R){ unsigned int tId = threadIdx.x; unsigned int laneId = tId & 31; unsigned int gId = (blockIdx.x * blockDim.x + tId); unsigned int x = gId >> 5; if(x < nnz){ DTYPE tmp_val = 0; ITYPE idx0 = dInds0[x]; ITYPE idx1 = dInds1[x]; ITYPE idx2 = dInds2[x]; ITYPE idx3 = dInds3[x]; for(ITYPE r=laneId; r<R; r+=32) { tmp_val = vals[x] * dU1[idx1 * R + r] * dU2[idx2 * R + r] * dU3[idx3 * R + r]; atomicAdd(&dU0[idx0 * R + r], tmp_val); } } } // CUDA kernel call to do COO MTTKRP 4D using loop __global__ void mttkrp_COO_kernel_4D_loop(DTYPE *const vals, ITYPE * const dInds0, ITYPE * const dInds1, ITYPE *const dInds2, ITYPE * const dInds3, ITYPE nnz, DTYPE *dU0, DTYPE * const dU1, DTYPE * const dU2, DTYPE * const dU3, ITYPE mode, ITYPE R){ unsigned int tId = threadIdx.x; unsigned int laneId = tId & 31; unsigned int gId = (blockIdx.x * blockDim.x + tId); //like PARTI size_t num_loops_nnz = 1 * 32; size_t const nnz_per_loop = gridDim.x * blockDim.x; if(nnz > nnz_per_loop) { num_loops_nnz = ((nnz + nnz_per_loop - 1) / nnz_per_loop) << 5; } unsigned int x; for(size_t nl=0; nl<num_loops_nnz; ++nl) { x = (gId + nl * nnz_per_loop) >> 5; if(x < nnz){ DTYPE tmp_val = 0; ITYPE idx0 = dInds0[x]; ITYPE idx1 = dInds1[x]; ITYPE idx2 = dInds2[x]; ITYPE idx3 = dInds3[x]; for(ITYPE r=laneId; r<R; r+=32) { tmp_val = vals[x] * dU1[idx1 * R + r] * dU2[idx2 * R + r] * dU3[idx3 * R + r]; atomicAdd(&dU0[idx0 * R + r], tmp_val); } } __syncthreads(); } } //no atomics because all 1 in HYB - COO __global__ void mttkrp_HYB_COO_kernel(DTYPE *vals, ITYPE *dInds0, ITYPE *dInds1, ITYPE *dInds2, ITYPE nnz, DTYPE *dU0, DTYPE *dU1, DTYPE *dU2, ITYPE mode, ITYPE R){ unsigned int tId = threadIdx.x; unsigned int laneId = tId & 31; unsigned int gId = (blockIdx.x * blockDim.x + tId); unsigned int x = gId >> 5; if(x < nnz){ DTYPE tmp_val = 0; ITYPE idx0 = dInds0[x]; ITYPE idx1 = dInds1[x]; ITYPE idx2 = dInds2[x]; for(ITYPE r=laneId; r<R; r+=32) { tmp_val = vals[x] * dU1[idx1 * R + r] * dU2[idx2 * R + r]; dU0[idx0 * R + r] += tmp_val; } } } // CUDA kernel call to do COO MTTKRP using loop __global__ void mttkrp_HYB_COO_kernel_loop(DTYPE * const vals, ITYPE * const dInds0, ITYPE * const dInds1, ITYPE * const dInds2, const ITYPE nnz, DTYPE *dU0, DTYPE * const dU1, DTYPE * const dU2, ITYPE mode, ITYPE R){ unsigned int tId = threadIdx.x; unsigned int laneId = tId & 31; unsigned int gId = (blockIdx.x * blockDim.x + tId); //like PARTI size_t num_loops_nnz = 1 * 32; size_t const nnz_per_loop = gridDim.x * blockDim.x; if(nnz > nnz_per_loop) { num_loops_nnz = ((nnz + nnz_per_loop - 1) / nnz_per_loop) << 5; } unsigned int x; for(size_t nl=0; nl<num_loops_nnz; ++nl) { x = (gId + nl * nnz_per_loop) >> 5; if(x < nnz){ DTYPE tmp_val = 0; ITYPE idx0 = dInds0[x]; ITYPE idx1 = dInds1[x]; ITYPE idx2 = dInds2[x]; for(ITYPE r=laneId; r<R; r+=32) { tmp_val = vals[x] * dU1[idx1 * R + r] * dU2[idx2 * R + r]; dU0[idx0 * R + r] += tmp_val; } } __syncthreads(); } } //no atomics because all 1 in HYB - COO __global__ void mttkrp_HYB_COO_kernel_4D(DTYPE *vals, ITYPE *dInds0, ITYPE *dInds1, ITYPE *dInds2, ITYPE *dInds3, ITYPE nnz, DTYPE *dU0, DTYPE *dU1, DTYPE *dU2, DTYPE *dU3, ITYPE mode, ITYPE R){ unsigned int tId = threadIdx.x; unsigned int laneId = tId & 31; unsigned int gId = (blockIdx.x * blockDim.x + tId); unsigned int x = gId >> 5; if(x < nnz){ DTYPE tmp_val = 0; ITYPE idx0 = dInds0[x]; ITYPE idx1 = dInds1[x]; ITYPE idx2 = dInds2[x]; ITYPE idx3 = dInds3[x]; for(ITYPE r=laneId; r<R; r+=32) { tmp_val = vals[x] * dU1[idx1 * R + r] * dU2[idx2 * R + r] * dU3[idx3 * R + r]; dU0[idx0 * R + r] += tmp_val; } } } // CUDA kernel call to do COO MTTKRP 4D using loop __global__ void mttkrp_HYB_COO_kernel_4D_loop(DTYPE *const vals, ITYPE * const dInds0, ITYPE * const dInds1, ITYPE *const dInds2, ITYPE * const dInds3, ITYPE nnz, DTYPE *dU0, DTYPE * const dU1, DTYPE * const dU2, DTYPE * const dU3, ITYPE mode, ITYPE R){ unsigned int tId = threadIdx.x; unsigned int laneId = tId & 31; unsigned int gId = (blockIdx.x * blockDim.x + tId); //like PARTI size_t num_loops_nnz = 1 * 32; size_t const nnz_per_loop = gridDim.x * blockDim.x; if(nnz > nnz_per_loop) { num_loops_nnz = ((nnz + nnz_per_loop - 1) / nnz_per_loop) << 5; } unsigned int x; for(size_t nl=0; nl<num_loops_nnz; ++nl) { x = (gId + nl * nnz_per_loop) >> 5; if(x < nnz){ DTYPE tmp_val = 0; ITYPE idx0 = dInds0[x]; ITYPE idx1 = dInds1[x]; ITYPE idx2 = dInds2[x]; ITYPE idx3 = dInds3[x]; for(ITYPE r=laneId; r<R; r+=32) { tmp_val = vals[x] * dU1[idx1 * R + r] * dU2[idx2 * R + r] * dU3[idx3 * R + r]; dU0[idx0 * R + r] += tmp_val; } } __syncthreads(); } } __global__ void mttkrp_CSL_kernel(DTYPE * vals, ITYPE *dfbrIdx0, ITYPE *dSlcMapperBin, ITYPE *dInds2, ITYPE *fbrPtr0, ITYPE *dInds1, unsigned int nSlices, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int TbPerSlc, int LogOfTPS){ unsigned int tId = threadIdx.x; unsigned int laneId = tId & 31; unsigned int gId = (blockIdx.x * blockDim.x + tId); unsigned int workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5; unsigned int slc = gId >> (5 + logOfWPC); // 5: minimum 1 WARP (2^5) DTYPE tmp_val; if(slc < nSlices){ unsigned int mappedSlc = slc;//dSlcMapperBin[slc]; unsigned int idx0 = dfbrIdx0[mappedSlc]; int fb_st = fbrPtr0[mappedSlc]; int fb_end = fbrPtr0[mappedSlc+1]; tmp_val = 0; for (int fbr = fb_st + workId; fbr < fb_end; fbr+=warpPerSlice){ unsigned int idx1 = dInds1[fbr]; unsigned int idx2 = dInds2[fbr]; for(unsigned int r=laneId; r<R; r+=32) { tmp_val += vals[fbr] * dU2[idx2 * R + r] * dU1[idx1 * R + r]; } } for(unsigned int r=laneId; r<R; r+=32) { atomicAdd(&dU0[idx0 * R + r], tmp_val); } } } __global__ void mttkrp_CSL_kernel_bin(DTYPE * vals, ITYPE *dfbrIdx0, ITYPE *dSlcMapperBin, ITYPE *dInds2, ITYPE *fbrPtr0, ITYPE *dInds1, unsigned int nSlices, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC){ unsigned int tId = threadIdx.x; unsigned int laneId = tId & 31; unsigned int gId = (blockIdx.x * blockDim.x + tId); unsigned int workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5; unsigned int slc = gId >> (5 + logOfWPC); // 5: minimum 1 WARP (2^5) DTYPE tmp_val; if(slc < nSlices){ unsigned int mappedSlc = dSlcMapperBin[slc]; unsigned int idx0 = dfbrIdx0[mappedSlc]; int fb_st = fbrPtr0[mappedSlc]; int fb_end = fbrPtr0[mappedSlc+1]; tmp_val = 0; for (int fbr = fb_st + workId; fbr < fb_end; fbr+=warpPerSlice){ unsigned int idx1 = dInds1[fbr]; unsigned int idx2 = dInds2[fbr]; for(unsigned int r=laneId; r<R; r+=32) { tmp_val += vals[fbr] * dU2[idx2 * R + r] * dU1[idx1 * R + r]; } } for(unsigned int r=laneId; r<R; r+=32) { atomicAdd(&dU0[idx0 * R + r], tmp_val); } } } // CSL kernel with loop like ParTI __global__ void mttkrp_CSL_kernel_bin_loop(DTYPE * vals, ITYPE *dfbrIdx0, ITYPE *dSlcMapperBin, ITYPE *dInds2, ITYPE *fbrPtr0, ITYPE *dInds1, unsigned int nSlices, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int TbPerSlc, int LogOfTPS){ unsigned int tId = threadIdx.x; unsigned int laneId = tId & 31; unsigned int gId = (blockIdx.x * blockDim.x + tId); unsigned int workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5; unsigned int slc = gId >> (5 + logOfWPC); // 5: minimum 1 WARP (2^5) DTYPE tmp_val; //like PARTI size_t num_loops_nnz = 1 * 32; size_t const nnz_per_loop = gridDim.x * blockDim.x; if(nSlices > nnz_per_loop) { num_loops_nnz = ((nSlices + nnz_per_loop - 1) / nnz_per_loop) << 5; } for(size_t nl=0; nl<num_loops_nnz; ++nl) { slc = (gId + nl * nnz_per_loop) >> 5; if(slc < nSlices){ unsigned int mappedSlc = dSlcMapperBin[slc]; unsigned int idx0 = dfbrIdx0[mappedSlc]; int fb_st = fbrPtr0[mappedSlc]; int fb_end = fbrPtr0[mappedSlc+1]; tmp_val = 0; for (int fbr = fb_st + workId; fbr < fb_end; fbr+=warpPerSlice){ unsigned int idx1 = dInds1[fbr]; unsigned int idx2 = dInds2[fbr]; for(unsigned int r=laneId; r<R; r+=32) { tmp_val += vals[fbr] * dU2[idx2 * R + r] * dU1[idx1 * R + r]; } } for(unsigned int r=laneId; r<R; r+=32) { atomicAdd(&dU0[idx0 * R + r], tmp_val); } } __syncthreads(); } } // CUDA kernel call to do HCSR MTTKRP __global__ void mttkrp_CSL_kernel_hvyBin(DTYPE * vals, ITYPE *dfbrIdx0, ITYPE *dSlcMapperBin, ITYPE *dInds2, ITYPE *fbrPtr0, ITYPE *dInds1, unsigned int nSlices, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int TbPerSlc, int logOfTPS){ unsigned int laneId = threadIdx.x & 31; unsigned int workId = threadIdx.x >> 5; unsigned int slc = blockIdx.x >> logOfTPS; unsigned int localBId = blockIdx.x & (TbPerSlc -1); DTYPE tmp = 0, tmp_val; if(slc < nSlices){ unsigned int mappedSlc = dSlcMapperBin[slc]; unsigned int idx0 = dfbrIdx0[mappedSlc] ;//slc; unsigned int nFbr = fbrPtr0[mappedSlc+1] - fbrPtr0[mappedSlc]; unsigned int fbrPerTb = (nFbr + TbPerSlc - 1 ) >> logOfTPS; unsigned int fb_st = fbrPtr0[mappedSlc] + localBId * fbrPerTb ; unsigned int fb_end = fbrPtr0[mappedSlc] + (localBId + 1) * fbrPerTb ; tmp_val = 0; for (int fbr = fb_st + workId; fbr < fb_end && fbr < fbrPtr0[mappedSlc+1]; fbr+=warpPerSlice){ unsigned int idx1 = dInds1[fbr]; unsigned int idx2 = dInds2[fbr]; for(unsigned int r=laneId; r<R; r+=32) { tmp_val += vals[fbr] * dU2[idx2 * R + r] * dU1[idx1 * R + r]; } } for(unsigned int r=laneId; r<R; r+=32) { atomicAdd(&dU0[idx0 * R + r], tmp_val); } } } // HCSR MTTKRP : 16 WARP = 1 TB per slice __global__ void mttkrp_HCSR_kernel_16WARP(DTYPE * vals, ITYPE *dfbrIdx0, ITYPE *dSlcMapperBin, ITYPE *dInds2, ITYPE *fbrPtr0, ITYPE *fbrPtr1, ITYPE *fbrIdx1, unsigned int nSlices, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC){ unsigned int tId = threadIdx.x; unsigned int laneId = tId & 31; unsigned int gId = (blockIdx.x * blockDim.x + tId); unsigned int workId = tId >> 5; //(tId & ((1 << (5 + logOfWPC)) - 1)) >> 5; unsigned int slc = blockIdx.x ;//gId >> (5 + logOfWPC); // 5: minimum 1 WARP (2^5) DTYPE tmp = 0; DTYPE tmp_val; if(slc < nSlices){ unsigned int mappedSlc = dSlcMapperBin[slc]; unsigned int idx0 = dfbrIdx0[mappedSlc] ;//slc; int fb_st = fbrPtr0[mappedSlc]; int fb_end = fbrPtr0[mappedSlc+1]; for (int fbr = fb_st + workId; fbr < fb_end; fbr+=warpPerSlice){ tmp_val = 0; for(unsigned int x = fbrPtr1[fbr]; x < fbrPtr1[fbr+1]; ++x) { unsigned int idx2 = dInds2[x]; for(unsigned int r=laneId; r<R; r+=32) { tmp_val += vals[x] * dU2[idx2 * R + r]; } } // unsigned int idx1 = dInds1[fbrPtr1[fbr]]; unsigned int idx1 = fbrIdx1[fbr]; for(unsigned int r=laneId; r<R; r+=32) { tmp += tmp_val * dU1[idx1 * R + r] ; // C matrix } } for(unsigned int r=laneId; r<R; r+=32) { atomicAdd(&dU0[idx0 * R + r], tmp); } } } // CUDA kernel call to do HCSR MTTKRP for the first bin 1 WARP per slice __global__ void mttkrp_HCSR_kernel_COO(DTYPE * vals, ITYPE *dfbrIdx0, ITYPE *dSlcMapperBin, ITYPE *dInds2, ITYPE *fbrPtr0, ITYPE *fbrPtr1, ITYPE *fbrIdx1, unsigned int nSlices, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int TbPerSlc, int LogOfTPS){ unsigned int tId = threadIdx.x; unsigned int laneId = tId & 31; unsigned int gId = (blockIdx.x * blockDim.x + tId); unsigned int slc = gId >> 5; // 5: minimum 1 WARP (2^5) DTYPE tmp = 0, tmp_val; if(slc < nSlices){ unsigned int mappedSlc = dSlcMapperBin[slc]; unsigned int idx0 = dfbrIdx0[mappedSlc] ;//slc; int fb_st = fbrPtr0[mappedSlc]; int fb_end = fbrPtr0[mappedSlc+1]; for (int fbr = fb_st; fbr < fb_end; fbr++){ tmp_val = 0; for(unsigned int x = fbrPtr1[fbr]; x < fbrPtr1[fbr+1]; ++x) { unsigned int idx2 = dInds2[x]; for(unsigned int r=laneId; r<R; r+=32) { tmp_val += vals[x] * dU2[idx2 * R + r]; } } unsigned int idx1 = fbrIdx1[fbr]; for(unsigned int r=laneId; r<R; r+=32) { dU0[idx0 * R + r] += tmp_val * dU1[idx1 * R + r] ; } } } } // CUDA kernel call to do HCSR MTTKRP __global__ void mttkrp_HCSR_kernel_smllBin(DTYPE * vals, ITYPE *dfbrIdx0, ITYPE *dSlcMapperBin, ITYPE *dInds2, ITYPE *fbrPtr0, ITYPE *fbrPtr1, ITYPE *fbrIdx1, unsigned int nSlices, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int TbPerSlc, int LogOfTPS){ unsigned int tId = threadIdx.x; unsigned int laneId = tId & 31; unsigned int gId = (blockIdx.x * blockDim.x + tId); unsigned int workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5; unsigned int slc = gId >> (5 + logOfWPC); // 5: minimum 1 WARP (2^5) // unsigned int slcPerTb = 16/warpPerSlice; // unsigned int shSlc = slc & slcPerTb; DTYPE tmp = 0, tmp_val; if(slc < nSlices){ unsigned int mappedSlc = dSlcMapperBin[slc]; unsigned int idx0 = dfbrIdx0[mappedSlc] ;//slc; int fb_st = fbrPtr0[mappedSlc]; int fb_end = fbrPtr0[mappedSlc+1]; for (int fbr = fb_st + workId; fbr < fb_end; fbr+=warpPerSlice){ tmp_val = 0; for(unsigned int x = fbrPtr1[fbr]; x < fbrPtr1[fbr+1]; ++x) { unsigned int idx2 = dInds2[x]; for(unsigned int r=laneId; r<R; r+=32) { tmp_val += vals[x] * dU2[idx2 * R + r]; } } unsigned int idx1 = fbrIdx1[fbr];// dInds1[fbrPtr1[fbr]]; for(unsigned int r=laneId; r<R; r+=32) { tmp += tmp_val * dU1[idx1 * R + r] ; } } for(unsigned int r=laneId; r<R; r+=32) { atomicAdd(&dU0[idx0 * R + r], tmp); } } } // CUDA kernel call to do HCSR MTTKRP __global__ void mttkrp_HCSR_kernel_smllBin_4D(DTYPE * vals, ITYPE *dfbrIdx0, ITYPE *dSlcMapperBin, ITYPE *dInds3, ITYPE *fbrPtr0, ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE *fbrPtr2, ITYPE *fbrIdx2, unsigned int nSlices, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2, DTYPE *dU3, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int TbPerSlc, int LogOfTPS){ unsigned int tId = threadIdx.x; unsigned int laneId = tId & 31; unsigned int gId = (blockIdx.x * blockDim.x + tId); unsigned int workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5; unsigned int slc = gId >> (5 + logOfWPC); // 5: minimum 1 WARP (2^5) DTYPE outbuffer = 0, tmp_val = 0, outbuffer1 = 0; if(slc < nSlices){ unsigned int mappedSlc = dSlcMapperBin[slc]; unsigned int idx0 = dfbrIdx0[mappedSlc] ;//slc; for (int fbrS = fbrPtr0[mappedSlc]; fbrS < fbrPtr0[mappedSlc+1]; fbrS++){ unsigned int idx1 = fbrIdx1[fbrS];// dInds1[fbrPtr1[fbr]]; outbuffer1 = 0; for (int fbr = fbrPtr1[fbrS] + workId; fbr < fbrPtr1[fbrS+1]; fbr+=warpPerSlice){ ITYPE idx2 = fbrIdx2[fbr]; tmp_val = 0; for(unsigned int x = fbrPtr2[fbr]; x < fbrPtr2[fbr+1]; ++x) { unsigned int idx3 = dInds3[x]; for(unsigned int r=laneId; r<R; r+=32) tmp_val += vals[x] * dU3[idx3 * R + r]; } for(unsigned int r=laneId; r<R; r+=32) outbuffer1 += tmp_val * dU2[idx2 * R + r] ; } for(unsigned int r=laneId; r<R; r+=32) outbuffer += outbuffer1 * dU1[idx1 * R + r] ; } for(unsigned int r=laneId; r<R; r+=32) { atomicAdd(&dU0[idx0 * R + r], outbuffer); } } } // CUDA kernel call to do HCSR MTTKRP __global__ void mttkrp_HCSR_kernel_hvyBin(DTYPE * vals, ITYPE *dfbrIdx0, ITYPE *dSlcMapperBin, ITYPE *dInds2, ITYPE *fbrPtr0, ITYPE *fbrPtr1, ITYPE *fbrIdx1, unsigned int nSlices, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int TbPerSlc, int logOfTPS){ unsigned int laneId = threadIdx.x & 31; unsigned int workId = threadIdx.x >> 5; unsigned int slc = blockIdx.x >> logOfTPS; unsigned int localBId = blockIdx.x & (TbPerSlc -1); DTYPE tmp = 0, tmp_val; if(slc < nSlices){ unsigned int mappedSlc = dSlcMapperBin[slc]; unsigned int idx0 = dfbrIdx0[mappedSlc] ;//slc; unsigned int nFbr = fbrPtr0[mappedSlc+1] - fbrPtr0[mappedSlc]; unsigned int fbrPerTb = (nFbr + TbPerSlc - 1 ) >> logOfTPS; unsigned int fb_st = fbrPtr0[mappedSlc] + localBId * fbrPerTb ; unsigned int fb_end = fbrPtr0[mappedSlc] + (localBId + 1) * fbrPerTb ; for (int fbr = fb_st + workId; fbr < fb_end && fbr < fbrPtr0[mappedSlc+1] ; fbr+=warpPerSlice){ tmp_val = 0; for(unsigned int x = fbrPtr1[fbr]; x < fbrPtr1[fbr+1]; ++x) { unsigned int idx2 = dInds2[x]; for(unsigned int r=laneId; r<R; r+=32) { tmp_val += vals[x] * dU2[idx2 * R + r]; } } unsigned int idx1 = fbrIdx1[fbr];//dInds1[fbrPtr1[fbr]]; for(unsigned int r=laneId; r<R; r+=32) { tmp += tmp_val * dU1[idx1 * R + r] ; // // atomicAdd(&dU0[idx0 * R + r], tmp); } } for(unsigned int r=laneId; r<R; r+=32) { atomicAdd(&dU0[idx0 * R + r], tmp); } } } // CUDA kernel call to do HCSR MTTKRP __global__ void mttkrp_HCSR_kernel_hvyBin_4D(DTYPE * vals, ITYPE *dfbrIdx0, ITYPE *dSlcMapperBin, ITYPE *dInds3, ITYPE *fbrPtr0, ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE *fbrPtr2, ITYPE *fbrIdx2, unsigned int nSlices, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2, DTYPE *dU3, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int TbPerSlc, int logOfTPS){ unsigned int laneId = threadIdx.x & 31; unsigned int workId = threadIdx.x >> 5; unsigned int slc = blockIdx.x >> logOfTPS; unsigned int localBId = blockIdx.x & (TbPerSlc -1); DTYPE outbuffer = 0, tmp_val = 0, outbuffer1 = 0;; if(slc < nSlices){ unsigned int mappedSlc = dSlcMapperBin[slc]; unsigned int idx0 = dfbrIdx0[mappedSlc] ;//slc; unsigned int nFbr = fbrPtr0[mappedSlc+1] - fbrPtr0[mappedSlc]; unsigned int fbrPerTb = (nFbr + TbPerSlc - 1 ) >> logOfTPS; unsigned int fb_st = fbrPtr0[mappedSlc] + localBId * fbrPerTb ; unsigned int fb_end = fbrPtr0[mappedSlc] + (localBId + 1) * fbrPerTb ; for (int fbrS = fb_st; fbrS < fb_end && fbrS < fbrPtr0[mappedSlc+1] ; fbrS++){ unsigned int idx1 = fbrIdx1[fbrS];// dInds1[fbrPtr1[fbr]]; outbuffer1 = 0; for (int fbr = fbrPtr1[fbrS] + workId; fbr < fbrPtr1[fbrS+1]; fbr+=warpPerSlice){ ITYPE idx2 = fbrIdx2[fbr]; tmp_val = 0; for(unsigned int x = fbrPtr2[fbr]; x < fbrPtr2[fbr+1]; ++x) { unsigned int idx3 = dInds3[x]; for(unsigned int r=laneId; r<R; r+=32) tmp_val += vals[x] * dU3[idx3 * R + r]; } for(unsigned int r=laneId; r<R; r+=32) outbuffer1 += tmp_val * dU2[idx2 * R + r] ; } for(unsigned int r=laneId; r<R; r+=32) outbuffer += outbuffer1 * dU1[idx1 * R + r] ; } for(unsigned int r=laneId; r<R; r+=32) { atomicAdd(&dU0[idx0 * R + r], outbuffer); } } } // CUDA fbr atomic sing slcLikeFbr __global__ void mttkrp_MIHCSR_kernel_slc_atomic_fbrLvlPar(DTYPE * vals, ITYPE *fbrLikeSlcInds, ITYPE *dInds2, ITYPE *fbrPtr0, ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE nFibers, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int fbrPerWarp, int logOfFPW){ ITYPE tId = threadIdx.x; ITYPE laneId = tId & 31; ITYPE bdim = blockDim.x; ITYPE gId = (blockIdx.x * bdim + tId); ITYPE workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5; //tId >> 5; //tId >> 5;// ITYPE fbr = (gId >> (5 + logOfWPC)) << logOfFPW; // 5: minimum 1 WARP (2^5) // blockIdx.x ;// DTYPE tmp = 0, tmp_val; if(fbr < nFibers - 1){ tmp_val = 0; bool diffFiber = false; unsigned int idx0; for (int fr = 0; fr < fbrPerWarp && (fbr+fr) < (nFibers - 1); ++fr){ diffFiber = false; unsigned int idx1 = fbrIdx1[fbr+fr];// dInds1[fbrPtr1[fbr]]; idx0 = fbrLikeSlcInds[fbr+fr];//slc; tmp_val = 0; for(unsigned int x = fbrPtr1[fbr+fr] + workId; x < fbrPtr1[fbr+fr+1]; x+=warpPerSlice) { unsigned int idx2 = dInds2[x]; for(unsigned int r=laneId; r<R; r+=32) { tmp_val += vals[x] * dU2[idx2 * R + r]; //2MR } } for(unsigned int r=laneId; r<R; r+=32) { tmp += tmp_val * dU1[idx1 * R + r] ; //2PR } if(fbrLikeSlcInds[fbr+fr] != fbrLikeSlcInds[fbr+fr+1]) { diffFiber = true; for(unsigned int r=laneId; r<R; r+=32) { atomicAdd(&dU0[idx0 * R + r], tmp); //2PR } tmp = 0; } } if(!diffFiber) { for(unsigned int r=laneId; r<R; r+=32) { atomicAdd(&dU0[idx0 * R + r], tmp); } } } } // CUDA fbr atomic sing slcLikeFbr __global__ void mttkrp_MIHCSR_kernel_slc_atomic_fbrLvlPar_4D(DTYPE * vals, ITYPE *fbrLikeSlcInds, ITYPE *dInds3, ITYPE *fbrPtr0, ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE *fbrPtr2, ITYPE *fbrIdx2, ITYPE nFibers, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2, DTYPE *dU3, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int fbrPerWarp, int logOfFPW){ ITYPE tId = threadIdx.x; ITYPE laneId = tId & 31; ITYPE bdim = blockDim.x; ITYPE gId = (blockIdx.x * bdim + tId); ITYPE workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5; //tId >> 5; //tId >> 5;// ITYPE fbrS = (gId >> (5 + logOfWPC)) << logOfFPW; // 5: minimum 1 WARP (2^5) // blockIdx.x ;// DTYPE tmp = 0, tmp_val, tmp2= 0; if(fbrS < nFibers - 1){ tmp_val = 0; bool diffFiber = false; unsigned int idx0; for (int fr = 0; fr < fbrPerWarp && (fbrS+fr) < (nFibers - 1); ++fr){ diffFiber = false; unsigned int idx1 = fbrIdx1[fbrS+fr];// dInds1[fbrPtr1[fbr]]; idx0 = fbrLikeSlcInds[fbrS+fr];//slc; tmp = 0; for (int fbr = fbrPtr1[fbrS+fr] + workId; fbr < fbrPtr1[fbrS+fr+1]; fbr+=warpPerSlice){ ITYPE idx2 = fbrIdx2[fbr]; tmp_val = 0; for(unsigned int x = fbrPtr2[fbr]; x < fbrPtr2[fbr+1]; x++) { unsigned int idx3 = dInds3[x]; for(unsigned int r=laneId; r<R; r+=32) { tmp_val += vals[x] * dU3[idx3 * R + r]; //2MR } } for(unsigned int r=laneId; r<R; r+=32) { tmp += tmp_val * dU2[idx2 * R + r] ; } } for(unsigned int r=laneId; r<R; r+=32) { tmp2 += tmp * dU1[idx1 * R + r] ; } if(fbrLikeSlcInds[fbrS+fr] != fbrLikeSlcInds[fbrS+fr+1]) { diffFiber = true; for(unsigned int r=laneId; r<R; r+=32) { atomicAdd(&dU0[idx0 * R + r], tmp2); //2PR } tmp2 = 0; } } if(!diffFiber) { for(unsigned int r=laneId; r<R; r+=32) atomicAdd(&dU0[idx0 * R + r], tmp2); //2PR } } } // CUDA kernel call to do HCSR MTTKRP __global__ void mttkrp_MIHCSR_kernel_smllBin_fbr_atomic(DTYPE * vals, ITYPE *dfbrIdx0, ITYPE *dSlcMapperBin, ITYPE *dInds2, ITYPE *fbrPtr0, ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE nSlices, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int TbPerSlc, int LogOfTPS){ ITYPE tId = threadIdx.x; ITYPE laneId = tId & 31; ITYPE bdim = blockDim.x; ITYPE gId = (blockIdx.x * bdim + tId); ITYPE workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5; ITYPE slc = gId >> (5 + logOfWPC); // 5: minimum 1 WARP (2^5) DTYPE tmp = 0, tmp_val; if(slc < nSlices){ unsigned int mappedSlc = dSlcMapperBin[slc]; unsigned int idx2 = dfbrIdx0[mappedSlc] ;//slc; int fb_st = fbrPtr0[mappedSlc]; int fb_end = fbrPtr0[mappedSlc+1]; for (int fbr = fb_st + workId; fbr < fb_end; fbr+=warpPerSlice){ tmp_val = 0; unsigned int idx0 = fbrIdx1[fbr];// dInds1[fbrPtr1[fbr]]; for(unsigned int x = fbrPtr1[fbr]; x < fbrPtr1[fbr+1]; ++x) { unsigned int idx1 = dInds2[x]; for(unsigned int r=laneId; r<R; r+=32) { tmp_val += vals[x] * dU1[idx1 * R + r]; //2MR } } for(unsigned int r=laneId; r<R; r+=32) { tmp = tmp_val * dU2[idx2 * R + r] ; atomicAdd(&dU0[idx0 * R + r], tmp); //2PR } } } } // CUDA kernel call to do HCSR MTTKRP __global__ void mttkrp_MIHCSR_kernel_smllBin_fbr_atomic_4D(DTYPE * vals, ITYPE *dfbrIdx0, ITYPE *dSlcMapperBin, ITYPE *dInds3, ITYPE *fbrPtr0, ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE *fbrPtr2, ITYPE *fbrIdx2, unsigned int nSlices, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2, DTYPE *dU3, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int TbPerSlc, int LogOfTPS){ unsigned int tId = threadIdx.x; unsigned int laneId = tId & 31; unsigned int gId = (blockIdx.x * blockDim.x + tId); unsigned int workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5; unsigned int slc = gId >> (5 + logOfWPC); // 5: minimum 1 WARP (2^5) DTYPE outbuffer = 0, tmp_val = 0, tmp = 0; if(slc < nSlices){ unsigned int mappedSlc = dSlcMapperBin[slc]; unsigned int idx2 = dfbrIdx0[mappedSlc] ;//slc; for (int fbrS = fbrPtr0[mappedSlc]; fbrS < fbrPtr0[mappedSlc+1]; fbrS++){ unsigned int idx3 = fbrIdx1[fbrS];// dInds1[fbrPtr1[fbr]]; tmp = 0; for (int fbr = fbrPtr1[fbrS] + workId; fbr < fbrPtr1[fbrS+1]; fbr+=warpPerSlice){ ITYPE idx0 = fbrIdx2[fbr]; tmp_val = 0; for(unsigned int x = fbrPtr2[fbr]; x < fbrPtr2[fbr+1]; ++x) { unsigned int idx1 = dInds3[x]; for(unsigned int r=laneId; r<R; r+=32) tmp_val += vals[x] * dU1[idx1 * R + r]; } for(unsigned int r=laneId; r<R; r+=32) { tmp = tmp_val * dU2[idx2 * R + r] * dU3[idx3 * R + r] ; atomicAdd(&dU0[idx0 * R + r], tmp); } } } } } // CUDA kernel call to do HCSR MTTKRP __global__ void mttkrp_MIHCSR_kernel_hvyBin_fbr_atomic_4D(DTYPE * vals, ITYPE *dfbrIdx0, ITYPE *dSlcMapperBin, ITYPE *dInds3, ITYPE *fbrPtr0, ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE *fbrPtr2, ITYPE *fbrIdx2, unsigned int nSlices, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2, DTYPE *dU3, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int TbPerSlc, int logOfTPS){ unsigned int laneId = threadIdx.x & 31; unsigned int workId = threadIdx.x >> 5; unsigned int slc = blockIdx.x >> logOfTPS; unsigned int localBId = blockIdx.x & (TbPerSlc -1); DTYPE outbuffer = 0, tmp_val = 0, tmp = 0;; if(slc < nSlices){ unsigned int mappedSlc = dSlcMapperBin[slc]; unsigned int idx2 = dfbrIdx0[mappedSlc] ;//slc; unsigned int nFbr = fbrPtr0[mappedSlc+1] - fbrPtr0[mappedSlc]; unsigned int fbrPerTb = (nFbr + TbPerSlc - 1 ) >> logOfTPS; unsigned int fb_st = fbrPtr0[mappedSlc] + localBId * fbrPerTb ; unsigned int fb_end = fbrPtr0[mappedSlc] + (localBId + 1) * fbrPerTb ; for (int fbrS = fb_st; fbrS < fb_end && fbrS < fbrPtr0[mappedSlc+1] ; fbrS++){ unsigned int idx3 = fbrIdx1[fbrS];// dInds1[fbrPtr1[fbr]]; tmp = 0; for (int fbr = fbrPtr1[fbrS] + workId; fbr < fbrPtr1[fbrS+1]; fbr+=warpPerSlice){ ITYPE idx0 = fbrIdx2[fbr]; tmp_val = 0; for(unsigned int x = fbrPtr2[fbr]; x < fbrPtr2[fbr+1]; ++x) { unsigned int idx1 = dInds3[x]; for(unsigned int r=laneId; r<R; r+=32) tmp_val += vals[x] * dU1[idx1 * R + r]; } for(unsigned int r=laneId; r<R; r+=32) { tmp = tmp_val * dU2[idx2 * R + r] * dU3[idx3 * R + r] ; atomicAdd(&dU0[idx0 * R + r], tmp); } } } } } // CUDA kernel call to do HCSR MTTKRP __global__ void mttkrp_MIHCSR_kernel_smllBin_fbrS_atomic_4D(DTYPE * vals, ITYPE *dfbrIdx0, ITYPE *dSlcMapperBin, ITYPE *dInds3, ITYPE *fbrPtr0, ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE *fbrPtr2, ITYPE *fbrIdx2, unsigned int nSlices, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2, DTYPE *dU3, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int TbPerSlc, int LogOfTPS){ unsigned int tId = threadIdx.x; unsigned int laneId = tId & 31; unsigned int gId = (blockIdx.x * blockDim.x + tId); unsigned int workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5; unsigned int slc = gId >> (5 + logOfWPC); // 5: minimum 1 WARP (2^5) DTYPE tmp = 0, tmp_val, tmp2 = 0; if(slc < nSlices){ unsigned int mappedSlc = dSlcMapperBin[slc]; unsigned int idx3 = dfbrIdx0[mappedSlc] ;//slc; for (int fbrS = fbrPtr0[mappedSlc]; fbrS < fbrPtr0[mappedSlc+1]; fbrS++){ unsigned int idx0 = fbrIdx1[fbrS];// dInds1[fbrPtr1[fbr]]; tmp = 0; for (int fbr = fbrPtr1[fbrS] + workId; fbr < fbrPtr1[fbrS+1]; fbr+=warpPerSlice){ ITYPE idx1 = fbrIdx2[fbr]; tmp_val = 0; for(unsigned int x = fbrPtr2[fbr]; x < fbrPtr2[fbr+1]; ++x) { unsigned int idx2 = dInds3[x]; for(unsigned int r=laneId; r<R; r+=32) tmp_val += vals[x] * dU2[idx2 * R + r]; } for(unsigned int r=laneId; r<R; r+=32) tmp += tmp_val * dU1[idx1 * R + r] ; } for(unsigned int r=laneId; r<R; r+=32) { tmp2 = tmp * dU3[idx3 * R + r]; atomicAdd(&dU0[idx0 * R + r], tmp2); //2PR } } } } // CUDA kernel call to do HCSR MTTKRP __global__ void mttkrp_MIHCSR_kernel_hvyBin_fbrS_atomic_4D(DTYPE * vals, ITYPE *dfbrIdx0, ITYPE *dSlcMapperBin, ITYPE *dInds3, ITYPE *fbrPtr0, ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE *fbrPtr2, ITYPE *fbrIdx2, unsigned int nSlices, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2, DTYPE *dU3, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int TbPerSlc, int logOfTPS){ unsigned int laneId = threadIdx.x & 31; unsigned int workId = threadIdx.x >> 5; unsigned int slc = blockIdx.x >> logOfTPS; unsigned int localBId = blockIdx.x & (TbPerSlc -1); DTYPE tmp = 0, tmp_val, tmp2 = 0; if(slc < nSlices){ unsigned int mappedSlc = dSlcMapperBin[slc]; unsigned int idx3 = dfbrIdx0[mappedSlc] ;//slc; unsigned int nFbr = fbrPtr0[mappedSlc+1] - fbrPtr0[mappedSlc]; unsigned int fbrPerTb = (nFbr + TbPerSlc - 1 ) >> logOfTPS; unsigned int fb_st = fbrPtr0[mappedSlc] + localBId * fbrPerTb ; unsigned int fb_end = fbrPtr0[mappedSlc] + (localBId + 1) * fbrPerTb ; for (int fbrS = fb_st; fbrS < fb_end && fbrS < fbrPtr0[mappedSlc+1] ; fbrS++){ unsigned int idx0 = fbrIdx1[fbrS];// dInds1[fbrPtr1[fbr]]; tmp = 0; for (int fbr = fbrPtr1[fbrS] + workId; fbr < fbrPtr1[fbrS+1]; fbr+=warpPerSlice){ ITYPE idx1 = fbrIdx2[fbr]; tmp_val = 0; for(unsigned int x = fbrPtr2[fbr]; x < fbrPtr2[fbr+1]; ++x) { unsigned int idx2 = dInds3[x]; for(unsigned int r=laneId; r<R; r+=32) tmp_val += vals[x] * dU2[idx2 * R + r]; } for(unsigned int r=laneId; r<R; r+=32) tmp += tmp_val * dU1[idx1 * R + r] ; } for(unsigned int r=laneId; r<R; r+=32) { tmp2 = tmp * dU3[idx3 * R + r]; atomicAdd(&dU0[idx0 * R + r], tmp2); //2PR } } } } // CUDA fbr atomic sing slcLikeFbr __global__ void mttkrp_MIHCSR_kernel_fbr_atomic_fbrLvlPar(DTYPE * vals, ITYPE *fbrLikeSlcInds, ITYPE *dInds2, ITYPE *fbrPtr0, ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE nFibers, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC){ ITYPE tId = threadIdx.x; ITYPE laneId = tId & 31; ITYPE bdim = blockDim.x; ITYPE gId = (blockIdx.x * bdim + tId); ITYPE workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5; //tId >> 5; //tId >> 5;// ITYPE fbr = gId >> (5 + logOfWPC); // 5: minimum 1 WARP (2^5) // blockIdx.x ;// DTYPE tmp = 0, tmp_val; if(fbr < nFibers - 1){ tmp_val = 0; unsigned int idx0 = fbrIdx1[fbr];// dInds1[fbrPtr1[fbr]]; unsigned int idx2 = fbrLikeSlcInds[fbr];//slc; for(unsigned int x = fbrPtr1[fbr] + workId; x < fbrPtr1[fbr+1]; x+=warpPerSlice) { unsigned int idx1 = dInds2[x]; for(unsigned int r=laneId; r<R; r+=32) { tmp_val += vals[x] * dU1[idx1 * R + r]; //2MR } } for(unsigned int r=laneId; r<R; r+=32) { tmp = tmp_val * dU2[idx2 * R + r] ; atomicAdd(&dU0[idx0 * R + r], tmp); //2PR } } } // CUDA fbr atomic sing slcLikeFbr __global__ void mttkrp_MIHCSR_kernel_fbr_atomic_fbrLvlPar_4D(DTYPE * vals, ITYPE *fbrLikeSlcInds, ITYPE *dInds3, ITYPE *fbrPtr0, ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE *fbrPtr2, ITYPE *fbrIdx2, ITYPE nFibers, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2, DTYPE *dU3, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC){ ITYPE tId = threadIdx.x; ITYPE laneId = tId & 31; ITYPE bdim = blockDim.x; ITYPE gId = (blockIdx.x * bdim + tId); ITYPE workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5; //tId >> 5; //tId >> 5;// ITYPE fbrS = gId >> (5 + logOfWPC); // 5: minimum 1 WARP (2^5) // blockIdx.x ;// DTYPE tmp = 0, tmp_val, tmp2 = 0; if(fbrS < nFibers - 1){ tmp = 0; unsigned int idx2 = fbrLikeSlcInds[fbrS];//slc; unsigned int idx3 = fbrIdx1[fbrS];// dInds1[fbrPtr1[fbr]]; for (int fbr = fbrPtr1[fbrS] + workId; fbr < fbrPtr1[fbrS+1]; fbr+=warpPerSlice){ unsigned int idx0 = fbrIdx2[fbr]; tmp_val = 0; for(unsigned int x = fbrPtr2[fbr]; x < fbrPtr2[fbr+1]; ++x) { unsigned int idx1 = dInds3[x]; for(unsigned int r=laneId; r<R; r+=32) tmp_val += vals[x] * dU1[idx1 * R + r]; //2MR // if(laneId == 0) // printf("from GPU: (%d %d %d %d) - %f %f %f %f \n", idx0, idx1, idx2, idx3, dU0[idx0 * R] , dU1[idx1 * R], dU2[idx2 * R], dU3[idx3 * R]); } for(unsigned int r=laneId; r<R; r+=32) { tmp = tmp_val * dU2[idx2 * R + r] * dU3[idx3 * R + r] ; atomicAdd(&dU0[idx0 * R + r], tmp); } } } } // CUDA fbr atomic sing slcLikeFbr __global__ void mttkrp_MIHCSR_kernel_fbrS_atomic_fbrLvlPar_4D(DTYPE * vals, ITYPE *fbrLikeSlcInds, ITYPE *dInds3, ITYPE *fbrPtr0, ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE *fbrPtr2, ITYPE *fbrIdx2, ITYPE nFibers, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2, DTYPE *dU3, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC){ ITYPE tId = threadIdx.x; ITYPE laneId = tId & 31; ITYPE bdim = blockDim.x; ITYPE gId = (blockIdx.x * bdim + tId); ITYPE workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5; //tId >> 5; //tId >> 5;// ITYPE fbrS = gId >> (5 + logOfWPC); // 5: minimum 1 WARP (2^5) // blockIdx.x ;// DTYPE tmp = 0, tmp_val, tmp2 = 0; if(fbrS < nFibers - 1){ tmp = 0; unsigned int idx0 = fbrIdx1[fbrS];// dInds1[fbrPtr1[fbr]]; unsigned int idx3 = fbrLikeSlcInds[fbrS];//slc; for (int fbr = fbrPtr1[fbrS] + workId; fbr < fbrPtr1[fbrS+1]; fbr+=warpPerSlice){ unsigned int idx1 = fbrIdx2[fbr]; tmp_val = 0; for(unsigned int x = fbrPtr2[fbr]; x < fbrPtr2[fbr+1]; ++x) { unsigned int idx2 = dInds3[x]; for(unsigned int r=laneId; r<R; r+=32) tmp_val += vals[x] * dU2[idx2 * R + r] ; //2MR } for(unsigned int r=laneId; r<R; r+=32) tmp += tmp_val * dU1[idx1 * R + r] ; } for(unsigned int r=laneId; r<R; r+=32) { tmp2 = tmp * dU3[idx3 * R + r]; atomicAdd(&dU0[idx0 * R + r], tmp2); //2PR } } } // CUDA fbr atomic sing slcLikeFbr __global__ void mttkrp_MIHCSR_kernel_fbr_atomic_fbrLvlPar_loop(DTYPE * vals, ITYPE *fbrLikeSlcInds, ITYPE *dInds2, ITYPE *fbrPtr0, ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE nFibers, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC){ ITYPE tId = threadIdx.x; ITYPE laneId = tId & 31; ITYPE bdim = blockDim.x; ITYPE gId = (blockIdx.x * bdim + tId); //like PARTI //hardcoded for 1 warp per nnz size_t num_loops_fbr = 1 * 32; size_t const fbr_per_loop = gridDim.x * blockDim.x; if(nFibers > fbr_per_loop) { num_loops_fbr = ((nFibers + fbr_per_loop - 1) / fbr_per_loop) << 5; } DTYPE tmp = 0, tmp_val; unsigned int fbr; for(size_t nl=0; nl<num_loops_fbr; ++nl) { fbr = (gId + nl * fbr_per_loop) >> 5; if(fbr < nFibers - 1){ tmp_val = 0; unsigned int idx0 = fbrIdx1[fbr];// dInds1[fbrPtr1[fbr]]; unsigned int idx2 = fbrLikeSlcInds[fbr];//slc; for(unsigned int x = fbrPtr1[fbr]; x < fbrPtr1[fbr+1]; x++) { unsigned int idx1 = dInds2[x]; for(unsigned int r=laneId; r<R; r+=32) { tmp_val += vals[x] * dU1[idx1 * R + r]; //2MR } } for(unsigned int r=laneId; r<R; r+=32) { tmp = tmp_val * dU2[idx2 * R + r] ; atomicAdd(&dU0[idx0 * R + r], tmp); //2PR } } } } // CUDA kernel call to do HCSR MTTKRP __global__ void mttkrp_MIHCSR_kernel_hvyBin_fbr_atomic(DTYPE * vals, ITYPE *dfbrIdx0, ITYPE *dSlcMapperBin, ITYPE *dInds2, ITYPE *fbrPtr0, ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE nSlices, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int TbPerSlc, int logOfTPS){ ITYPE laneId = threadIdx.x & 31; ITYPE workId = threadIdx.x >> 5; ITYPE slc = blockIdx.x >> logOfTPS; ITYPE localBId = blockIdx.x & (TbPerSlc -1); DTYPE tmp = 0, tmp_val; if(slc < nSlices){ unsigned int mappedSlc = dSlcMapperBin[slc]; unsigned int idx2 = dfbrIdx0[mappedSlc] ;//slc; unsigned int nFbr = fbrPtr0[mappedSlc+1] - fbrPtr0[mappedSlc]; unsigned int fbrPerTb = (nFbr + TbPerSlc - 1 ) >> logOfTPS; unsigned int fb_st = fbrPtr0[mappedSlc] + localBId * fbrPerTb ; unsigned int fb_end = fbrPtr0[mappedSlc] + (localBId + 1) * fbrPerTb ; for (int fbr = fb_st + workId; fbr < fb_end && fbr < fbrPtr0[mappedSlc+1]; fbr+=warpPerSlice){ tmp_val = 0; unsigned int idx0 = fbrIdx1[fbr];// dInds1[fbrPtr1[fbr]]; for(unsigned int x = fbrPtr1[fbr]; x < fbrPtr1[fbr+1]; ++x) { unsigned int idx1 = dInds2[x]; for(unsigned int r=laneId; r<R; r+=32) { tmp_val += vals[x] * dU1[idx1 * R + r]; } } for(unsigned int r=laneId; r<R; r+=32) { tmp = tmp_val * dU2[idx2 * R + r] ; atomicAdd(&dU0[idx0 * R + r], tmp); } } } } // CUDA kernel call to do HCSR MTTKRP __global__ void mttkrp_MIHCSR_kernel_smllBin_all_atomic(DTYPE * vals, ITYPE *dfbrIdx0, ITYPE *dSlcMapperBin, ITYPE *dInds2, ITYPE *fbrPtr0, ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE nSlices, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int TbPerSlc, int LogOfTPS){ ITYPE tId = threadIdx.x; ITYPE laneId = tId & 31; ITYPE bdim = blockDim.x; ITYPE gId = (blockIdx.x * bdim + tId); ITYPE workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5; ITYPE slc = gId >> (5 + logOfWPC); // 5: minimum 1 WARP (2^5) // ITYPE slcPerTb = 16/warpPerSlice; // ITYPE shSlc = slc & slcPerTb; DTYPE tmp_val; if(slc < nSlices){ unsigned int mappedSlc = dSlcMapperBin[slc]; unsigned int idx1 = dfbrIdx0[mappedSlc] ;//slc; int fb_st = fbrPtr0[mappedSlc]; int fb_end = fbrPtr0[mappedSlc+1]; for (int fbr = fb_st + workId; fbr < fb_end; fbr+=warpPerSlice){ unsigned int idx2 = fbrIdx1[fbr];// dInds1[fbrPtr1[fbr]]; // for(unsigned int r=laneId; r<R; r+=32) // tmp_val = dU1[idx1 * R + r] * dU2[idx2 * R + r] ; //1PR for(unsigned int x = fbrPtr1[fbr]; x < fbrPtr1[fbr+1]; ++x) { unsigned int idx0 = dInds2[x]; for(unsigned int r=laneId; r<R; r+=32) { tmp_val = vals[x] * dU1[idx1 * R + r] * dU2[idx2 * R + r] ; atomicAdd(&dU0[idx0 * R + r], tmp_val); //2MR // atomicAdd(&dU0[idx0 * R + r], (tmp_val * vals[x]) ); } } } } } // CUDA kernel call to do HCSR MTTKRP __global__ void mttkrp_MIHCSR_kernel_hvyBin_all_atomic(DTYPE * vals, ITYPE *dfbrIdx0, ITYPE *dSlcMapperBin, ITYPE *dInds2, ITYPE *fbrPtr0, ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE nSlices, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int TbPerSlc, int logOfTPS){ ITYPE laneId = threadIdx.x & 31; ITYPE workId = threadIdx.x >> 5; ITYPE slc = blockIdx.x >> logOfTPS; ITYPE localBId = blockIdx.x & (TbPerSlc -1); DTYPE tmp = 0, tmp_val; if(slc < nSlices){ unsigned int mappedSlc = dSlcMapperBin[slc]; unsigned int idx1 = dfbrIdx0[mappedSlc] ;//slc; unsigned int nFbr = fbrPtr0[mappedSlc+1] - fbrPtr0[mappedSlc]; unsigned int fbrPerTb = (nFbr + TbPerSlc - 1 ) >> logOfTPS; unsigned int fb_st = fbrPtr0[mappedSlc] + localBId * fbrPerTb ; unsigned int fb_end = fbrPtr0[mappedSlc] + (localBId + 1) * fbrPerTb ; for (int fbr = fb_st + workId; fbr < fb_end && fbr < fbrPtr0[mappedSlc+1]; fbr+=warpPerSlice){ tmp_val = 0; unsigned int idx2 = fbrIdx1[fbr];// dInds1[fbrPtr1[fbr]]; for(unsigned int r=laneId; r<R; r+=32) tmp_val = dU1[idx1 * R + r] * dU2[idx2 * R + r] ; for(unsigned int x = fbrPtr1[fbr]; x < fbrPtr1[fbr+1]; ++x) { unsigned int idx0 = dInds2[x]; for(unsigned int r=laneId; r<R; r+=32) { // atomicAdd(&dU0[idx0 * R + r], (tmp_val * vals[x]) ); tmp_val = vals[x] * dU1[idx1 * R + r] * dU2[idx2 * R + r] ; atomicAdd(&dU0[idx0 * R + r], tmp_val); } } } } } // CUDA kernel call to do HCSR MTTKRP __global__ void mttkrp_MIHCSR_kernel_smllBin_all_atomic_4D(DTYPE * vals, ITYPE *dfbrIdx0, ITYPE *dSlcMapperBin, ITYPE *dInds3, ITYPE *fbrPtr0, ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE *fbrPtr2, ITYPE *fbrIdx2, unsigned int nSlices, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2, DTYPE *dU3, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int TbPerSlc, int LogOfTPS){ unsigned int tId = threadIdx.x; unsigned int laneId = tId & 31; unsigned int gId = (blockIdx.x * blockDim.x + tId); unsigned int workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5; unsigned int slc = gId >> (5 + logOfWPC); // 5: minimum 1 WARP (2^5) DTYPE outbuffer = 0, tmp_val = 0, tmp = 0; if(slc < nSlices){ unsigned int mappedSlc = dSlcMapperBin[slc]; unsigned int idx1 = dfbrIdx0[mappedSlc] ;//slc; for (int fbrS = fbrPtr0[mappedSlc]; fbrS < fbrPtr0[mappedSlc+1]; fbrS++){ unsigned int idx2 = fbrIdx1[fbrS];// dInds1[fbrPtr1[fbr]]; for(unsigned int r=laneId; r<R; r+=32) tmp_val = dU1[idx1 * R + r] * dU2[idx2 * R + r] ; //1PR for (int fbr = fbrPtr1[fbrS] + workId; fbr < fbrPtr1[fbrS+1]; fbr+=warpPerSlice){ ITYPE idx3 = fbrIdx2[fbr]; for(unsigned int x = fbrPtr2[fbr]; x < fbrPtr2[fbr+1]; ++x) { unsigned int idx0 = dInds3[x]; for(unsigned int r=laneId; r<R; r+=32) { tmp = vals[x] * dU3[idx3 * R + r] * tmp_val;//2MR atomicAdd(&dU0[idx0 * R + r], tmp); } } } } } } // CUDA kernel call to do HCSR MTTKRP __global__ void mttkrp_MIHCSR_kernel_hvyBin_all_atomic_4D(DTYPE * vals, ITYPE *dfbrIdx0, ITYPE *dSlcMapperBin, ITYPE *dInds3, ITYPE *fbrPtr0, ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE *fbrPtr2, ITYPE *fbrIdx2, unsigned int nSlices, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2, DTYPE *dU3, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int TbPerSlc, int logOfTPS){ unsigned int laneId = threadIdx.x & 31; unsigned int workId = threadIdx.x >> 5; unsigned int slc = blockIdx.x >> logOfTPS; unsigned int localBId = blockIdx.x & (TbPerSlc -1); DTYPE tmp = 0, tmp_val; if(slc < nSlices){ unsigned int mappedSlc = dSlcMapperBin[slc]; unsigned int idx1 = dfbrIdx0[mappedSlc] ;//slc; unsigned int nFbr = fbrPtr0[mappedSlc+1] - fbrPtr0[mappedSlc]; unsigned int fbrPerTb = (nFbr + TbPerSlc - 1 ) >> logOfTPS; unsigned int fb_st = fbrPtr0[mappedSlc] + localBId * fbrPerTb ; unsigned int fb_end = fbrPtr0[mappedSlc] + (localBId + 1) * fbrPerTb ; for (int fbrS = fb_st; fbrS < fb_end && fbrS < fbrPtr0[mappedSlc+1] ; fbrS++){ unsigned int idx2 = fbrIdx1[fbrS];// dInds1[fbrPtr1[fbr]]; for(unsigned int r=laneId; r<R; r+=32) tmp_val = dU1[idx1 * R + r] * dU2[idx2 * R + r] ; //1PR for (int fbr = fbrPtr1[fbrS] + workId; fbr < fbrPtr1[fbrS+1]; fbr+=warpPerSlice){ ITYPE idx3 = fbrIdx2[fbr]; for(unsigned int x = fbrPtr2[fbr]; x < fbrPtr2[fbr+1]; ++x) { unsigned int idx0 = dInds3[x]; for(unsigned int r=laneId; r<R; r+=32) { tmp = vals[x] * dU3[idx3 * R + r] * tmp_val;//2MR atomicAdd(&dU0[idx0 * R + r], tmp); } } } } } } // CUDA fbr atomic sing slcLikeFbr __global__ void mttkrp_MIHCSR_kernel_all_atomic_fbrLvlPar(DTYPE * vals, ITYPE *fbrLikeSlcInds, ITYPE *dInds2, ITYPE *fbrPtr0, ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE nFibers, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC){ ITYPE tId = threadIdx.x; ITYPE laneId = tId & 31; ITYPE bdim = blockDim.x; ITYPE gId = (blockIdx.x * bdim + tId); ITYPE workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5; //tId >> 5; //tId >> 5;// ITYPE fbr = gId >> (5 + logOfWPC); // 5: minimum 1 WARP (2^5) // blockIdx.x ;// DTYPE tmp = 0, tmp_val; if(fbr < nFibers - 1){ tmp_val = 0; unsigned int idx1 = fbrLikeSlcInds[fbr];//slc; unsigned int idx2 = fbrIdx1[fbr];// dInds1[fbrPtr1[fbr]]; // if(laneId == 0 && idx1 == 0) // printf("GPU %d %d %f %f\n", idx1, idx2, dU1[idx1 * R], dU2[idx2 * R] ); for(unsigned int r=laneId; r<R; r+=32) tmp = dU1[idx1 * R + r] * dU2[idx2 * R + r] ; //1PR for(unsigned int x = fbrPtr1[fbr] + workId; x < fbrPtr1[fbr+1]; x+=warpPerSlice) { unsigned int idx0 = dInds2[x]; for(unsigned int r=laneId; r<R; r+=32) { tmp_val = vals[x] * tmp;///dU1[idx1 * R + r] * dU2[idx2 * R + r] ; //2MR atomicAdd(&dU0[idx0 * R + r], tmp_val); } } } } // CUDA fbr atomic sing slcLikeFbr __global__ void mttkrp_MIHCSR_kernel_all_atomic_fbrLvlPar_4D(DTYPE * vals, ITYPE *fbrLikeSlcInds, ITYPE *dInds3, ITYPE *fbrPtr0, ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE *fbrPtr2, ITYPE *fbrIdx2, ITYPE nFibers, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2, DTYPE *dU3, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC){ ITYPE tId = threadIdx.x; ITYPE laneId = tId & 31; ITYPE bdim = blockDim.x; ITYPE gId = (blockIdx.x * bdim + tId); ITYPE workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5; //tId >> 5; //tId >> 5;// ITYPE fbrS = gId >> (5 + logOfWPC); // 5: minimum 1 WARP (2^5) // blockIdx.x ;// DTYPE tmp = 0, tmp_val = 0;; if(fbrS < nFibers - 1){ tmp = 0; unsigned int idx1 = fbrLikeSlcInds[fbrS];//slc; unsigned int idx2 = fbrIdx1[fbrS];// dInds1[fbrPtr1[fbr]]; for(unsigned int r=laneId; r<R; r+=32) tmp_val = dU1[idx1 * R + r] * dU2[idx2 * R + r] ; //1PR for (int fbr = fbrPtr1[fbrS] + workId; fbr < fbrPtr1[fbrS+1]; fbr+=warpPerSlice){ ITYPE idx3 = fbrIdx2[fbr]; for(unsigned int x = fbrPtr2[fbr]; x < fbrPtr2[fbr+1]; ++x) { unsigned int idx0 = dInds3[x]; for(unsigned int r=laneId; r<R; r+=32) { tmp = vals[x] * dU3[idx3 * R + r] * tmp_val;//2MR atomicAdd(&dU0[idx0 * R + r], tmp); } } } } } // CUDA fbr atomic sing slcLikeFbr __global__ void mttkrp_MIHCSR_kernel_all_atomic_fbrLvlPar_loop(DTYPE * vals, ITYPE *fbrLikeSlcInds, ITYPE *dInds2, ITYPE *fbrPtr0, ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE nFibers, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC){ ITYPE tId = threadIdx.x; ITYPE laneId = tId & 31; ITYPE bdim = blockDim.x; ITYPE gId = (blockIdx.x * bdim + tId); ITYPE warpId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5; //tId >> 5; // ITYPE blockId = gId >> (5 + logOfWPC); // 5: minimum 1 WARP (2^5) //blockIdx.x ;// //like PARTI //hardcoded for 1 warp per nnz size_t num_loops_fbr = 1 * 32; size_t const fbr_per_loop = gridDim.x * blockDim.x; if(nFibers > fbr_per_loop) { num_loops_fbr = ((nFibers + fbr_per_loop - 1) / fbr_per_loop) << 5; } DTYPE tmp = 0, tmp_val; unsigned int fbr; for(size_t nl=0; nl<num_loops_fbr; ++nl) { fbr = (gId + nl * fbr_per_loop) >> 5; if(fbr < nFibers - 1){ tmp_val = 0; unsigned int idx2 = fbrIdx1[fbr];// dInds1[fbrPtr1[fbr]]; unsigned int idx1 = fbrLikeSlcInds[fbr];//slc; for(unsigned int r=laneId; r<R; r+=32) tmp = dU1[idx1 * R + r] * dU2[idx2 * R + r] ; //1PR for(unsigned int x = fbrPtr1[fbr] + warpId; x < fbrPtr1[fbr+1]; x+=warpPerSlice) { unsigned int idx0 = dInds2[x]; for(unsigned int r=laneId; r<R; r+=32) { tmp_val = vals[x] * tmp;///dU1[idx1 * R + r] * dU2[idx2 * R + r] ; //2MR atomicAdd(&dU0[idx0 * R + r], tmp_val); } } } } } int MTTKRP_COO_GPU(const Tensor &X, Matrix *U, const Options Opt){ //allocate and memcpy GPU memory //Tensor ITYPE mode = Opt.mode; ITYPE R = Opt.R; ITYPE *dInds0, *dInds1, *dInds2, *dInds3; DTYPE *dVals; ITYPE mode0 = X.modeOrder[0]; ITYPE mode1 = X.modeOrder[1]; ITYPE mode2 = X.modeOrder[2]; checkCuda(hipMalloc((void**) &dVals, X.totNnz * sizeof(DTYPE)), 0); checkCuda(hipMalloc((void**) &dInds0, X.totNnz * sizeof(ITYPE)), 0); checkCuda(hipMalloc((void**) &dInds1, X.totNnz * sizeof(ITYPE)), 0); checkCuda(hipMalloc((void**) &dInds2, X.totNnz * sizeof(ITYPE)), 0); checkCuda(hipMemcpy(dVals, &(X.vals[0]), X.totNnz * sizeof(DTYPE),hipMemcpyHostToDevice), 0); checkCuda(hipMemcpy(dInds0, &(X.inds[mode0][0]), X.totNnz * sizeof(ITYPE),hipMemcpyHostToDevice), 0); checkCuda(hipMemcpy(dInds1, &(X.inds[mode1][0]), X.totNnz * sizeof(ITYPE) ,hipMemcpyHostToDevice), 0); checkCuda(hipMemcpy(dInds2, &(X.inds[mode2][0]), X.totNnz * sizeof(ITYPE),hipMemcpyHostToDevice), 0); // //Matrices DTYPE *dU0, *dU1, *dU2, *dU3; checkCuda(hipMalloc((void**) &dU0, U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE)), 0); checkCuda(hipMalloc((void**) &dU1, U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE)), 0); checkCuda(hipMalloc((void**) &dU2, U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE)), 0); hipMemset(dU0, 0, U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE)); checkCuda(hipMemcpy(dU1, &(U[mode1].vals[0]), U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), 0); checkCuda(hipMemcpy(dU2, &(U[mode2].vals[0]), U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), 0); if(X.ndims == 4){ ITYPE mode3 = X.modeOrder[3]; checkCuda(hipMalloc((void**) &dInds3, X.totNnz * sizeof(ITYPE)), 0); checkCuda(hipMemcpy(dInds3, &(X.inds[mode3][0]), X.totNnz * sizeof(ITYPE),hipMemcpyHostToDevice), 0); checkCuda(hipMalloc((void**) &dU3, U[mode3].nRows * U[mode3].nCols * sizeof(DTYPE)), 0); checkCuda(hipMemcpy(dU3, &(U[mode3].vals[0]), U[mode3].nRows * U[mode3].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), 0); } // BLOCK and GRID int BLOCKSIZE = 128; dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); float mili = 0; bool useLoop = true; // /* Like PARTI loop */ = if(useLoop) grid.x = 32768; else grid.x = (32 * X.totNnz + BLOCKSIZE - 1) / BLOCKSIZE; // CUDA call cuda_timer_start(start); if(!useLoop){ if(X.ndims == 3) hipLaunchKernelGGL(( mttkrp_COO_kernel), dim3(grid), dim3(block), 0, 0, dVals, dInds0, dInds1, dInds2, X.totNnz, dU0, dU1, dU2, mode, R); else if(X.ndims == 4) hipLaunchKernelGGL(( mttkrp_COO_kernel_4D), dim3(grid), dim3(block), 0, 0, dVals, dInds0, dInds1, dInds2, dInds3, X.totNnz, dU0, dU1, dU2, dU3, mode, R); } // /* loop like ParTI */ else{ if(X.ndims == 3) hipLaunchKernelGGL(( mttkrp_COO_kernel_loop), dim3(grid), dim3(block), 0, 0, dVals, dInds0, dInds1, dInds2, X.totNnz, dU0, dU1, dU2, mode, R ); else if(X.ndims == 4) hipLaunchKernelGGL(( mttkrp_COO_kernel_4D_loop), dim3(grid), dim3(block), 0, 0, dVals, dInds0, dInds1, dInds2, dInds3, X.totNnz, dU0, dU1, dU2, dU3, mode, R); } cuda_timer_stop(start, stop, mili); if(useLoop) cout << "Loop on. "; cout << "COO GPU using loop - time " << mili << "ms"<< endl; // check correctness checkCuda(hipMemcpy(&U[mode0].vals[0], dU0, U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE), hipMemcpyDeviceToHost), 0); // print_output(U, 0); hipFree(dVals); hipFree(dU0); hipFree(dU1); hipFree(dU2); hipFree(dU3); hipFree(dInds0); hipFree(dInds1); hipFree(dInds2); hipFree(dInds3); return 0; } int MTTKRP_HCSR_GPU(Tensor &X, Matrix *U, const Options &Opt){ //allocate and memcpy GPU memory cout << "FIX fiber idx" << endl; //Tensor ITYPE *dInds2, *dInds3, *dfbrPtr0, *dfbrIdx0, *dfbrPtr1, *dfbrIdx1, *dFbrPtr2, *dFbrIdx2, *dSlcMapperBin; DTYPE *dVals; int logOfWarpPerSlice = log2(Opt.warpPerSlice); int TbPerSlc = 1; int logOfTPS = log2(TbPerSlc); ITYPE mode0 = X.modeOrder[0]; ITYPE mode1 = X.modeOrder[1]; ITYPE mode2 = X.modeOrder[2]; // dummy bin mapper to be compatible with bin mapper when bin are not used X.slcMapperBin.push_back(std::vector<ITYPE>()); for (int s = 0; s < X.fbrIdx[0].size(); ++s) X.slcMapperBin[0].push_back(s); checkCuda(hipMalloc((void**) &dVals, X.totNnz * sizeof(DTYPE)), 0); checkCuda(hipMalloc((void**) &dSlcMapperBin, X.slcMapperBin[0].size() * sizeof(ITYPE)), 0); checkCuda(hipMalloc((void**) &dfbrIdx0, X.fbrIdx[0].size() * sizeof(ITYPE)), 0); checkCuda(hipMalloc((void**) &dfbrPtr0, X.fbrPtr[0].size() * sizeof(ITYPE)), 0); checkCuda(hipMalloc((void**) &dfbrPtr1, X.fbrPtr[1].size() * sizeof(ITYPE)), 0); checkCuda(hipMalloc((void**) &dfbrIdx1, X.fbrIdx[1].size() * sizeof(ITYPE)), 0); checkCuda(hipMemcpy(dVals, &(X.vals[0]), X.totNnz * sizeof(DTYPE),hipMemcpyHostToDevice), 0); checkCuda(hipMemcpy(dSlcMapperBin, &(X.slcMapperBin[0][0]), X.slcMapperBin[0].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0); checkCuda(hipMemcpy(dfbrPtr0, &(X.fbrPtr[0][0]), X.fbrPtr[0].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0); checkCuda(hipMemcpy(dfbrIdx0, &(X.fbrIdx[0][0]), X.fbrIdx[0].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0); checkCuda(hipMemcpy(dfbrPtr1, &(X.fbrPtr[1][0]), X.fbrPtr[1].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0); checkCuda(hipMemcpy(dfbrIdx1, &(X.fbrIdx[1][0]), X.fbrIdx[1].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0); // //Matrices DTYPE *dU0, *dU1, *dU2, *dU3; checkCuda(hipMalloc((void**) &dU0, U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE)), 0); checkCuda(hipMalloc((void**) &dU1, U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE)), 0); checkCuda(hipMalloc((void**) &dU2, U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE)), 0); hipMemset(dU0, 0, U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE)); checkCuda(hipMemcpy(dU1, &(U[mode1].vals[0]), U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), 0); checkCuda(hipMemcpy(dU2, &(U[mode2].vals[0]), U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), 0); if(X.ndims == 3){ checkCuda(hipMalloc((void**) &dInds2, X.totNnz * sizeof(ITYPE)), 0); checkCuda(hipMemcpy(dInds2, &(X.inds[mode2][0]), X.totNnz * sizeof(ITYPE),hipMemcpyHostToDevice), 0); } if(X.ndims == 4){ ITYPE mode3 = X.modeOrder[3]; checkCuda(hipMalloc((void**) &dFbrIdx2, X.fbrIdx[2].size() * sizeof(ITYPE)), 0); checkCuda(hipMalloc((void**) &dFbrPtr2, X.fbrPtr[2].size() * sizeof(ITYPE)), 0); checkCuda(hipMalloc((void**) &dInds3, X.totNnz * sizeof(ITYPE)), 0); checkCuda(hipMalloc((void**) &dU3, U[mode3].nRows * U[mode3].nCols * sizeof(DTYPE)), 0); checkCuda(hipMemcpy(dFbrPtr2, &(X.fbrPtr[2][0]), X.fbrPtr[2].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0); checkCuda(hipMemcpy(dFbrIdx2, &(X.fbrIdx[2][0]), X.fbrIdx[2].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0); checkCuda(hipMemcpy(dInds3, &(X.inds[mode3][0]), X.totNnz * sizeof(ITYPE),hipMemcpyHostToDevice), 0); checkCuda(hipMemcpy(dU3, &(U[mode3].vals[0]), U[mode3].nRows * U[mode3].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), 0); } // BLOCK and GRID int BLOCKSIZE = 512; if(Opt.warpPerSlice * 32 > BLOCKSIZE){ cout << "BLOCKSIZE is smaller than work per slice! Increase BLOCKSIZE." << endl; exit(0); } dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1); grid.x = (Opt.warpPerSlice * 32 * X.dims[mode0] + BLOCKSIZE - 1) / BLOCKSIZE; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); float mili = 0; checkCuda(hipEventRecord(start), __LINE__); // mttkrp_HCSR_kernel_COO<<<grid, block, 32 * sizeof(DTYPE)>>>(dVals, dfbrIdx0, dSlcMapperBin, dInds2, dfbrPtr0, dfbrPtr1, dfbrIdx1, // X.fbrIdx[0].size(), dU0, dU1, dU2,Opt.mode, Opt.R, Opt.warpPerSlice, logOfWarpPerSlice, TbPerSlc, logOfTPS); if(X.ndims == 3) hipLaunchKernelGGL(( mttkrp_HCSR_kernel_smllBin), dim3(grid), dim3(block), 32 * sizeof(DTYPE), 0, dVals, dfbrIdx0, dSlcMapperBin, dInds2, dfbrPtr0, dfbrPtr1, dfbrIdx1, X.fbrIdx[0].size(), dU0, dU1, dU2,Opt.mode, Opt.R, Opt.warpPerSlice, logOfWarpPerSlice, TbPerSlc, logOfTPS); else hipLaunchKernelGGL(( mttkrp_HCSR_kernel_smllBin_4D), dim3(grid), dim3(block), 32 * sizeof(DTYPE), 0, dVals, dfbrIdx0, dSlcMapperBin, dInds3, dfbrPtr0, dfbrPtr1, dfbrIdx1, dFbrPtr2, dFbrIdx2, X.fbrIdx[0].size(), dU0, dU1, dU2, dU3, Opt.mode, Opt.R, Opt.warpPerSlice, logOfWarpPerSlice, TbPerSlc, logOfTPS); checkCuda(hipEventRecord(stop), __LINE__); hipEventSynchronize(stop); checkCuda(hipEventElapsedTime(&mili, start, stop), __LINE__); hipDeviceSynchronize(); cout << "HCSR GPU - time " << mili << "ms"<< endl; // check correctness checkCuda(hipMemcpy(&U[mode0].vals[0], dU0, U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE), hipMemcpyDeviceToHost), 0); hipFree(dVals); hipFree(dU0); hipFree(dU1); hipFree(dU2); hipFree(dU3); hipFree(dInds2); hipFree(dInds3); hipFree(dfbrIdx0); hipFree(dfbrIdx1); hipFree(dFbrIdx2); hipFree(dfbrPtr0); hipFree(dfbrPtr1); hipFree(dFbrPtr2); return 0; } int MTTKRP_TILED_COO_GPU(TiledTensor *TiledX, Matrix *U, const Options Opt){ //allocate and memcpy GPU memory //Tensor ITYPE mode = Opt.mode; ITYPE R = Opt.R; ITYPE *dInds0, *dInds1, *dInds2; ITYPE dLoc = 0, totNnz = 0; DTYPE *dVals; // All tile same mode ITYPE mode0 = TiledX[0].modeOrder[0]; ITYPE mode1 = TiledX[0].modeOrder[1]; ITYPE mode2 = TiledX[0].modeOrder[2]; for (int tile = 0; tile < Opt.nTile; ++tile) totNnz += TiledX[tile].totNnz; checkCuda(hipMalloc((void**) &dVals, totNnz * sizeof(DTYPE)), 0); checkCuda(hipMalloc((void**) &dInds0, totNnz * sizeof(ITYPE)), 0); checkCuda(hipMalloc((void**) &dInds1, totNnz * sizeof(ITYPE)), 0); checkCuda(hipMalloc((void**) &dInds2, totNnz * sizeof(ITYPE)), 0); for (int tile = 0; tile < Opt.nTile; ++tile){ if(tile > 0) dLoc += TiledX[tile-1].totNnz; checkCuda(hipMemcpy(dVals + dLoc, &(TiledX[tile].vals[0]), TiledX[tile].totNnz * sizeof(DTYPE),hipMemcpyHostToDevice), 0); checkCuda(hipMemcpy(dInds0 + dLoc, &(TiledX[tile].inds[mode0][0]), TiledX[tile].totNnz * sizeof(ITYPE),hipMemcpyHostToDevice), 0); checkCuda(hipMemcpy(dInds1 + dLoc, &(TiledX[tile].inds[mode1][0]), TiledX[tile].totNnz * sizeof(ITYPE) ,hipMemcpyHostToDevice), 0); checkCuda(hipMemcpy(dInds2 + dLoc, &(TiledX[tile].inds[mode2][0]), TiledX[tile].totNnz * sizeof(ITYPE),hipMemcpyHostToDevice), 0); } // //Matrices DTYPE *dU0, *dU1, *dU2; checkCuda(hipMalloc((void**) &dU0, U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE)), 0); checkCuda(hipMalloc((void**) &dU1, U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE)), 0); checkCuda(hipMalloc((void**) &dU2, U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE)), 0); hipMemset(dU0, 0, U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE)); checkCuda(hipMemcpy(dU1, &(U[mode1].vals[0]), U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), 0); checkCuda(hipMemcpy(dU2, &(U[mode2].vals[0]), U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), 0); // BLOCK and GRID int BLOCKSIZE = 128; dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); float mili = 0, GPUTime = 0; // CUDA call dLoc = 0; for (int tile = 0; tile < Opt.nTile; ++tile){ if(tile > 0) dLoc += TiledX[tile-1].totNnz; cout << "Tile " << tile << " launched.. "<<endl; grid.x = (32 * TiledX[tile].totNnz + BLOCKSIZE - 1) / BLOCKSIZE; checkCuda(hipEventRecord(start), __LINE__); hipLaunchKernelGGL(( mttkrp_COO_kernel), dim3(grid), dim3(block), 0, 0, dVals + dLoc, dInds0 + dLoc, dInds1 + dLoc, dInds2 + dLoc, TiledX[tile].totNnz, dU0, dU1, dU2, mode, R); checkCuda(hipEventRecord(stop), __LINE__); hipEventSynchronize(stop); checkCuda(hipEventElapsedTime(&mili, start, stop), __LINE__); hipDeviceSynchronize(); cout << "Tile: " << tile << " - time " << mili << "ms"<< endl; GPUTime += mili; } cout << "COO GPU - time " << GPUTime << "ms"<< endl; // check correctness checkCuda(hipMemcpy(&U[mode0].vals[0], dU0, U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE), hipMemcpyDeviceToHost), 0); hipFree(dVals); hipFree(dU0); hipFree(dU1); hipFree(dU2); hipFree(dInds0); hipFree(dInds1); hipFree(dInds2); return 0; } int MTTKRP_B_HCSR_GPU(TiledTensor *TiledX, Matrix *U, const Options &Opt){ /*choosing kernel type: false: B-CSF- IPDPS work, true: parallelism at fiber level, call slc_atomic_fbrlblpar function*/ bool slcAtomicFbrLvlPar = false; /* Allocate and memcpy GPU memory */ //Tensor ITYPE *dInds2, *dInds3, *dfbrPtr0, *dfbrIdx0, *dfbrPtr1, *dfbrIdx1, *dFbrPtr2, *dFbrIdx2, *dSlcMapperBin, *dFbrLikeSlcInds; DTYPE *dVals; ITYPE dLoc = 0, dSlcLoc = 0, dSlcIdxLoc = 0, dFbrLoc =0, dFbrIdxLoc =0, dBinLoc = 0, dFbrLoc2 =0; ITYPE totNnz = 0, totSlcPtr = 0, totSlcIdx = 0, totFbrPtr = 0, totFbrIdx = 0, totFbrPtr2 = 0; // // All tile same mode ITYPE mode0 = TiledX[0].modeOrder[0]; ITYPE mode1 = TiledX[0].modeOrder[1]; ITYPE mode2 = TiledX[0].modeOrder[2]; ITYPE mode3 =((TiledX[0].ndims == 4) ? TiledX[0].modeOrder[3] : 0) ; for (int tile = 0; tile < Opt.nTile; ++tile){ totNnz += TiledX[tile].totNnz; totSlcPtr += TiledX[tile].fbrPtr[0].size() ; totSlcIdx += TiledX[tile].fbrIdx[0].size() ; totFbrPtr += TiledX[tile].fbrPtr[1].size() ; totFbrIdx += TiledX[tile].fbrIdx[1].size() ; totFbrPtr2 += ((TiledX[tile].ndims == 4) ? TiledX[tile].fbrPtr[2].size() : 0) ; } hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); float memcpyTime = 0; cuda_timer_start(start); checkCuda(hipMalloc((void**) &dVals, totNnz * sizeof(DTYPE)), 0); checkCuda(hipMalloc((void**) &dfbrPtr0, totSlcPtr * sizeof(ITYPE)), 0); checkCuda(hipMalloc((void**) &dfbrIdx0, totSlcIdx * sizeof(ITYPE)), 0); checkCuda(hipMalloc((void**) &dSlcMapperBin, totSlcPtr * sizeof(ITYPE)), 0); checkCuda(hipMalloc((void**) &dfbrPtr1, totFbrPtr * sizeof(ITYPE)), 0); checkCuda(hipMalloc((void**) &dfbrIdx1, totFbrIdx * sizeof(ITYPE)), 0); checkCuda(hipMalloc((void**) &dFbrLikeSlcInds, totFbrIdx * sizeof(ITYPE)), 0); if(TiledX[0].ndims == 3) checkCuda(hipMalloc((void**) &dInds2, totNnz * sizeof(ITYPE)), 0); if(TiledX[0].ndims == 4){ checkCuda(hipMalloc((void**) &dFbrIdx2, totFbrPtr2 * sizeof(ITYPE)), 0); checkCuda(hipMalloc((void**) &dFbrPtr2, totFbrPtr2 * sizeof(ITYPE)), 0); checkCuda(hipMalloc((void**) &dInds3, totNnz * sizeof(ITYPE)), 0); } /* cuda memcopy for tiled parts*/ for (int tile = 0; tile < Opt.nTile; ++tile){ if(tile > 0) { dLoc += TiledX[tile-1].totNnz; dSlcLoc += TiledX[tile - 1].fbrPtr[0].size(); // all tile same dSlcIdxLoc += TiledX[tile - 1].fbrIdx[0].size(); dFbrLoc += TiledX[tile - 1].fbrPtr[1].size(); dFbrIdxLoc += TiledX[tile - 1].fbrIdx[1].size(); dFbrLoc2 += ((TiledX[tile].ndims == 4) ? TiledX[tile - 1].fbrPtr[2].size() : 0) ; } checkCuda(hipMemcpy(dVals + dLoc, &(TiledX[tile].vals[0]), TiledX[tile].totNnz * sizeof(DTYPE),hipMemcpyHostToDevice), 0); checkCuda(hipMemcpy(dfbrPtr0 + dSlcLoc, &(TiledX[tile].fbrPtr[0][0]), TiledX[tile].fbrPtr[0].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0); checkCuda(hipMemcpy(dfbrIdx0 + dSlcIdxLoc, &(TiledX[tile].fbrIdx[0][0]), TiledX[tile].fbrIdx[0].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0); checkCuda(hipMemcpy(dfbrPtr1 + dFbrLoc, &(TiledX[tile].fbrPtr[1][0]), TiledX[tile].fbrPtr[1].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0); checkCuda(hipMemcpy(dfbrIdx1 + dFbrIdxLoc, &(TiledX[tile].fbrIdx[1][0]), TiledX[tile].fbrIdx[1].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0); if(slcAtomicFbrLvlPar) checkCuda(hipMemcpy(dFbrLikeSlcInds + dFbrIdxLoc, &(TiledX[tile].fbrLikeSlcInds[0]), TiledX[tile].fbrIdx[1].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0); if(TiledX[tile].ndims == 3) checkCuda(hipMemcpy(dInds2 + dLoc, &(TiledX[tile].inds[TiledX[tile].modeOrder[2]][0]), TiledX[tile].totNnz * sizeof(ITYPE),hipMemcpyHostToDevice), 0); if(TiledX[tile].ndims == 4){ checkCuda(hipMemcpy(dFbrPtr2 + dFbrLoc2, &(TiledX[tile].fbrPtr[2][0]), TiledX[tile].fbrPtr[2].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0); checkCuda(hipMemcpy(dFbrIdx2 + dFbrLoc2, &(TiledX[tile].fbrIdx[2][0]), TiledX[tile].fbrIdx[2].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0); checkCuda(hipMemcpy(dInds3 + dLoc, &(TiledX[tile].inds[mode3][0]), TiledX[tile].totNnz * sizeof(ITYPE),hipMemcpyHostToDevice), 0); } dBinLoc = 0; for (int bin = 0; bin < Opt.nBin; ++bin){ if(bin > 0) dBinLoc += TiledX[tile].slcMapperBin[bin-1].size(); checkCuda(hipMemcpy(dSlcMapperBin + dSlcIdxLoc + dBinLoc, &(TiledX[tile].slcMapperBin[bin][0]), TiledX[tile].slcMapperBin[bin].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0); } } cuda_timer_stop(start, stop, memcpyTime); cout << "Memcopy time " << memcpyTime << endl; // //Matrices DTYPE *dU0, *dU1, *dU2, *dU3; checkCuda(hipMalloc((void**) &dU0, U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE)), 0); checkCuda(hipMalloc((void**) &dU1, U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE)), 0); checkCuda(hipMalloc((void**) &dU2, U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE)), 0); hipMemset(dU0, 0, U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE)); checkCuda(hipMemcpy(dU1, &(U[mode1].vals[0]), U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), 0); checkCuda(hipMemcpy(dU2, &(U[mode2].vals[0]), U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), 0); if(TiledX[0].ndims == 4){ checkCuda(hipMalloc((void**) &dU3, U[mode3].nRows * U[mode3].nCols * sizeof(DTYPE)), 0); checkCuda(hipMemcpy(dU3, &(U[mode3].vals[0]), U[mode3].nRows * U[mode3].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), 0); } // BLOCK and GRID int BLOCKSIZE = 512; unsigned int rowInATB = BLOCKSIZE / (Opt.warpPerSlice*32); if(Opt.warpPerSlice * 32 > BLOCKSIZE){ cout << "BLOCKSIZE is smaller than work per slice! Increase BLOCKSIZE." << endl; exit(0); } hipStream_t streams[Opt.nBin]; float mili = 0, GPUTime = 0, CPUtimer = 0, allModeGPUTime = 0; int smallBinEndsAt = 5; /* Warp per slice and threadblock per size */ int *warpPerSlc = new int[Opt.nBin]; int *logOfWarpPerSlc = new int[Opt.nBin]; int *TbPerSlc = new int[Opt.nBin]; int *logOfTbPerSlc = new int[Opt.nBin]; for (int bin = 0; bin < Opt.nBin ; ++bin){ TbPerSlc[bin] = 1; warpPerSlc[bin] = ((bin > 0) ? 2 << (bin - 1) : 1); if(warpPerSlc[bin] > 16) warpPerSlc[bin] = 16; logOfWarpPerSlc[bin] = log2(warpPerSlc[bin]); TbPerSlc[bin] = 1; logOfTbPerSlc[bin] = 0; if (bin >= smallBinEndsAt){ TbPerSlc[bin] = 1 << (bin - smallBinEndsAt + 1); // 1st big bin starts with 1 TB 1 << 1 not 1 << 5 if(TbPerSlc[bin] > 32) TbPerSlc[bin] = 32; logOfTbPerSlc[bin] = log2(TbPerSlc[bin]); warpPerSlc[bin] = 16; logOfWarpPerSlc[bin] = 4; } } // TBD: change warpPerSlc to warpPerSlc[bin] and all int slcPerTb = 1; dLoc = 0, dSlcLoc = 0, dSlcIdxLoc = 0; dFbrLoc =0, dFbrIdxLoc = 0, dFbrLoc2= 0; for (int bin = 0; bin < Opt.nBin; ++bin) hipStreamCreate(&streams[bin]); /*MTTKRP on Opt.mode*/ int MTTKRPmode = mode0;//Opt.mode; for (int tile = 0; tile < Opt.nTile; ++tile){ dBinLoc = 0; if(tile > 0) { dLoc += TiledX[tile-1].totNnz; dSlcLoc += TiledX[tile - 1].fbrPtr[0].size(); dSlcIdxLoc += TiledX[tile - 1].fbrIdx[0].size(); dFbrLoc += TiledX[tile - 1].fbrPtr[1].size(); dFbrIdxLoc += TiledX[tile - 1].fbrIdx[1].size(); dFbrLoc2 += ((TiledX[0].ndims == 4) ? TiledX[tile - 1].fbrPtr[2].size() : 0) ; } BLOCKSIZE = (( slcAtomicFbrLvlPar == true) ? Opt.TBsize : 512) ; dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1); int smallBinEndsAt = 5; int slcPerTb = 0; // int warpPerFbr = BLOCKSIZE/32;//1;//Opt.warpPerSlice;//4;//; // int logOfWarpPerFbr = log2(warpPerFbr); // int bin = 0; // int fbrPerWarp = 1;//BLOCKSIZE/32; // dont overflow TB // int logOfFbrPerWarp = log2(fbrPerWarp); int warpPerFbr =Opt.warpPerSlice;//4;//; BLOCKSIZE/32;//1;// int logOfWarpPerFbr = log2(warpPerFbr); int fbrPerWarp = Opt.fiberPerWarp;//1;//BLOCKSIZE/32; // dont overflow TB int logOfFbrPerWarp = log2(fbrPerWarp ); grid.x = ( warpPerFbr * 32 * ((TiledX[tile].nFibers+fbrPerWarp-1)/fbrPerWarp) + BLOCKSIZE - 1) / BLOCKSIZE; double t0 = seconds(); cuda_timer_start(start); if(slcAtomicFbrLvlPar){ if(TiledX[0].ndims == 3) hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_slc_atomic_fbrLvlPar), dim3(grid), dim3(block), 0, 0, dVals + dLoc, dFbrLikeSlcInds + dFbrLoc, dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, TiledX[tile].nFibers, dU0, dU1, dU2, Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr, fbrPerWarp, logOfFbrPerWarp); else hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_slc_atomic_fbrLvlPar_4D), dim3(grid), dim3(block), 0, 0, dVals + dLoc, dFbrLikeSlcInds + dFbrLoc, dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, TiledX[tile].nFibers, dU0, dU1, dU2, dU3, Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr, fbrPerWarp, logOfFbrPerWarp); } else{ for (int bin = 0; bin < Opt.nBin ; ++bin){ if(bin < smallBinEndsAt){ ITYPE shSize = 0;//slcPerTb * 32 * sizeof(DTYPE); slcPerTb = 16 / warpPerSlc[bin]; dBinLoc += ((bin > 0) ? TiledX[tile].slcMapperBin[bin-1].size() : 0); grid.x = ( TbPerSlc[bin] * warpPerSlc[bin] * 32 * TiledX[tile].slcMapperBin[bin].size() + BLOCKSIZE - 1) / BLOCKSIZE; if(TiledX[0].ndims == 3) hipLaunchKernelGGL(( mttkrp_HCSR_kernel_smllBin), dim3(grid), dim3(block), shSize , streams[bin], dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc, dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, TiledX[tile].slcMapperBin[bin].size(), dU0, dU1, dU2, Opt.mode, Opt.R, warpPerSlc[bin], logOfWarpPerSlc[bin], TbPerSlc[bin], logOfTbPerSlc[bin]); else hipLaunchKernelGGL(( mttkrp_HCSR_kernel_smllBin_4D), dim3(grid), dim3(block), shSize , streams[bin], dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc, dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, TiledX[tile].slcMapperBin[bin].size(), dU0, dU1, dU2, dU3, Opt.mode, Opt.R, warpPerSlc[bin], logOfWarpPerSlc[bin], TbPerSlc[bin], logOfTbPerSlc[bin]); } // Processing heavy bin.. multiple TB per slice else{ dBinLoc += TiledX[tile].slcMapperBin[bin-1].size(); grid.x = (TbPerSlc[bin] * warpPerSlc[bin] * 32 * TiledX[tile].slcMapperBin[bin].size() + BLOCKSIZE - 1) / BLOCKSIZE; if(TiledX[0].ndims == 3) hipLaunchKernelGGL(( mttkrp_HCSR_kernel_hvyBin), dim3(grid), dim3(block), 0, streams[bin], dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc, dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, TiledX[tile].slcMapperBin[bin].size(), dU0, dU1, dU2, Opt.mode, Opt.R, warpPerSlc[bin], logOfWarpPerSlc[bin], TbPerSlc[bin], logOfTbPerSlc[bin]); else hipLaunchKernelGGL(( mttkrp_HCSR_kernel_hvyBin_4D), dim3(grid), dim3(block), 0, streams[bin], dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc, dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, TiledX[tile].slcMapperBin[bin].size(), dU0, dU1, dU2, dU3, Opt.mode, Opt.R, warpPerSlc[bin], logOfWarpPerSlc[bin], TbPerSlc[bin], logOfTbPerSlc[bin]); } } } cuda_timer_stop(start, stop, mili); CPUtimer += seconds() - t0; GPUTime += mili; if(Opt.verbose){ cout << "Tile: " << tile << " - time: " << mili << "ms"; cout <<" nnz: " << TiledX[tile].totNnz << " nFibers: " << TiledX[tile].fbrPtr[1].size() << " nSlc " << TiledX[tile].fbrIdx[0].size() << " "; cout << endl; } } allModeGPUTime += GPUTime; cout << "B-CSF-GPU-mode " << MTTKRPmode <<" :" << GPUTime << "," << endl; for (int bin = 0; bin < Opt.nBin; ++bin) hipStreamDestroy(streams[bin]); // check correctness checkCuda(hipMemcpy(&U[mode0].vals[0], dU0, U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE), hipMemcpyDeviceToHost), 0); hipFree(dVals); hipFree(dU0); hipFree(dU1); hipFree(dU2); hipFree(dU3); hipFree(dfbrIdx0); hipFree(dInds2); hipFree(dInds3); hipFree(dfbrIdx0); hipFree(dfbrIdx1); hipFree(dFbrIdx2); hipFree(dfbrPtr0); hipFree(dfbrPtr1); hipFree(dFbrPtr2); hipFree(dFbrLikeSlcInds); return 0; } int MTTKRP_B_HCSR_GPU_ANYMODE(TiledTensor *TiledX, Matrix *U, const Options &Opt, int mode){ /* Allocate and memcpy GPU memory */ //Tensor ITYPE *dInds2, *dInds3, *dfbrPtr0, *dfbrIdx0, *dfbrPtr1, *dfbrIdx1, *dFbrPtr2, *dFbrIdx2, *dSlcMapperBin, *dFbrLikeSlcInds; DTYPE *dVals; ITYPE dLoc = 0, dSlcLoc = 0, dSlcIdxLoc = 0, dFbrLoc =0, dFbrIdxLoc =0, dBinLoc = 0, dFbrLoc2 =0; ITYPE totNnz = 0, totSlcPtr = 0, totSlcIdx = 0, totFbrPtr = 0, totFbrIdx = 0, totFbrPtr2 = 0; // // All tile same mode ITYPE mode0 = 0;//TiledX[0].modeOrder[0]; ITYPE mode1 = 1;//TiledX[0].modeOrder[1]; ITYPE mode2 = 2;//TiledX[0].modeOrder[2]; ITYPE mode3 = 3;//((TiledX[0].ndims == 4) ? TiledX[0].modeOrder[3] : 0) ; for (int tile = 0; tile < Opt.nTile; ++tile){ totNnz += TiledX[tile].totNnz; totSlcPtr += TiledX[tile].fbrPtr[0].size() ; totSlcIdx += TiledX[tile].fbrIdx[0].size() ; totFbrPtr += TiledX[tile].fbrPtr[1].size() ; totFbrIdx += TiledX[tile].fbrIdx[1].size() ; totFbrPtr2 += ((TiledX[tile].ndims == 4) ? TiledX[tile].fbrPtr[2].size() : 0) ; } double t0 = seconds(); checkCuda(hipMalloc((void**) &dVals, totNnz * sizeof(DTYPE)), 0); checkCuda(hipMalloc((void**) &dfbrPtr0, totSlcPtr * sizeof(ITYPE)), 0); checkCuda(hipMalloc((void**) &dfbrIdx0, totSlcIdx * sizeof(ITYPE)), 0); checkCuda(hipMalloc((void**) &dSlcMapperBin, totSlcPtr * sizeof(ITYPE)), 0); checkCuda(hipMalloc((void**) &dfbrPtr1, totFbrPtr * sizeof(ITYPE)), 0); checkCuda(hipMalloc((void**) &dfbrIdx1, totFbrIdx * sizeof(ITYPE)), 0); if(TiledX[0].ndims == 3) checkCuda(hipMalloc((void**) &dInds2, totNnz * sizeof(ITYPE)), 0); if(TiledX[0].ndims == 4){ checkCuda(hipMalloc((void**) &dFbrIdx2, totFbrPtr2 * sizeof(ITYPE)), 0); checkCuda(hipMalloc((void**) &dFbrPtr2, totFbrPtr2 * sizeof(ITYPE)), 0); checkCuda(hipMalloc((void**) &dInds3, totNnz * sizeof(ITYPE)), 0); } /* cuda memcopy for tiled parts*/ for (int tile = 0; tile < Opt.nTile; ++tile){ if(tile > 0) { dLoc += TiledX[tile-1].totNnz; dSlcLoc += TiledX[tile - 1].fbrPtr[0].size(); // all tile same dSlcIdxLoc += TiledX[tile - 1].fbrIdx[0].size(); dFbrLoc += TiledX[tile - 1].fbrPtr[1].size(); dFbrIdxLoc += TiledX[tile - 1].fbrIdx[1].size(); dFbrLoc2 += ((TiledX[tile].ndims == 4) ? TiledX[tile - 1].fbrPtr[2].size() : 0) ; } checkCuda(hipMemcpy(dVals + dLoc, &(TiledX[tile].vals[0]), TiledX[tile].totNnz * sizeof(DTYPE),hipMemcpyHostToDevice), 0); checkCuda(hipMemcpy(dfbrPtr0 + dSlcLoc, &(TiledX[tile].fbrPtr[0][0]), TiledX[tile].fbrPtr[0].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0); checkCuda(hipMemcpy(dfbrIdx0 + dSlcIdxLoc, &(TiledX[tile].fbrIdx[0][0]), TiledX[tile].fbrIdx[0].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0); checkCuda(hipMemcpy(dfbrPtr1 + dFbrLoc, &(TiledX[tile].fbrPtr[1][0]), TiledX[tile].fbrPtr[1].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0); checkCuda(hipMemcpy(dfbrIdx1 + dFbrIdxLoc, &(TiledX[tile].fbrIdx[1][0]), TiledX[tile].fbrIdx[1].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0); if(TiledX[tile].ndims == 3) checkCuda(hipMemcpy(dInds2 + dLoc, &(TiledX[tile].inds[TiledX[tile].modeOrder[2]][0]), TiledX[tile].totNnz * sizeof(ITYPE),hipMemcpyHostToDevice), 0); if(TiledX[tile].ndims == 4){ checkCuda(hipMemcpy(dFbrPtr2 + dFbrLoc2, &(TiledX[tile].fbrPtr[2][0]), TiledX[tile].fbrPtr[2].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0); checkCuda(hipMemcpy(dFbrIdx2 + dFbrLoc2, &(TiledX[tile].fbrIdx[2][0]), TiledX[tile].fbrIdx[2].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0); checkCuda(hipMemcpy(dInds3 + dLoc, &(TiledX[tile].inds[TiledX[tile].modeOrder[3]][0]), TiledX[tile].totNnz * sizeof(ITYPE),hipMemcpyHostToDevice), 0); } dBinLoc = 0; for (int bin = 0; bin < Opt.nBin; ++bin){ if(bin > 0) dBinLoc += TiledX[tile].slcMapperBin[bin-1].size(); checkCuda(hipMemcpy(dSlcMapperBin + dSlcIdxLoc + dBinLoc, &(TiledX[tile].slcMapperBin[bin][0]), TiledX[tile].slcMapperBin[bin].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0); } } t0 = seconds(); unsigned int *dULoc = new unsigned int[TiledX[0].ndims]; unsigned int *szDU = new unsigned int[TiledX[0].ndims]; // //Matrices DTYPE *dU;// *dU0, *dU1, *dU2, *dU3; ITYPE mtxSize = ((TiledX[0].ndims == 3) ? (U[mode0].nRows + U[mode1].nRows + U[mode2].nRows) * U[mode0].nCols : (U[mode0].nRows + U[mode1].nRows + U[mode2].nRows + U[mode3].nRows) * U[mode0].nCols ); checkCuda(hipMalloc((void**) &dU, mtxSize * sizeof(DTYPE)), 0); for (int m = 0; m < TiledX[0].ndims; ++m) szDU[m] = U[m].nRows * U[m].nCols; ITYPE mtxLoc = 0; for (int m = 0; m < mode; ++m) mtxLoc += szDU[m]; checkCuda(hipMemcpy(dU + 0, &(U[mode0].vals[0]), U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), 0); checkCuda(hipMemcpy(dU + szDU[0], &(U[mode1].vals[0]), U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), 0); checkCuda(hipMemcpy(dU + szDU[0] + szDU[1], &(U[mode2].vals[0]), U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), 0); if(TiledX[0].ndims == 4) checkCuda(hipMemcpy(dU + szDU[0] + szDU[1] + szDU[2], &(U[mode3].vals[0]), U[mode3].nRows * U[mode3].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), 0); hipMemset(dU + mtxLoc, 0, U[mode].nRows * U[mode0].nCols * sizeof(DTYPE)); // BLOCK and GRID int BLOCKSIZE = 512; unsigned int rowInATB = BLOCKSIZE / (Opt.warpPerSlice*32); if(Opt.warpPerSlice * 32 > BLOCKSIZE){ cout << "BLOCKSIZE is smaller than work per slice! Increase BLOCKSIZE." << endl; exit(0); } hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipStream_t streams[Opt.nBin]; float mili = 0, GPUTime = 0, CPUtimer = 0, allModeGPUTime = 0; int smallBinEndsAt = 5; /* Warp per slice and threadblock per size */ int *warpPerSlc = new int[Opt.nBin]; int *logOfWarpPerSlc = new int[Opt.nBin]; int *TbPerSlc = new int[Opt.nBin]; int *logOfTbPerSlc = new int[Opt.nBin]; for (int bin = 0; bin < Opt.nBin ; ++bin){ TbPerSlc[bin] = 1; warpPerSlc[bin] = ((bin > 0) ? 2 << (bin - 1) : 1); if(warpPerSlc[bin] > 16) warpPerSlc[bin] = 16; logOfWarpPerSlc[bin] = log2(warpPerSlc[bin]); TbPerSlc[bin] = 1; logOfTbPerSlc[bin] = 0; if (bin >= smallBinEndsAt){ TbPerSlc[bin] = 1 << (bin - smallBinEndsAt + 1); // 1st big bin starts with 1 TB 1 << 1 not 1 << 5 if(TbPerSlc[bin] > 32) TbPerSlc[bin] = 32; logOfTbPerSlc[bin] = log2(TbPerSlc[bin]); warpPerSlc[bin] = 16; logOfWarpPerSlc[bin] = 4; } } // TBD: change warpPerSlc to warpPerSlc[bin] and all int slcPerTb = 1; dLoc = 0, dSlcLoc = 0, dSlcIdxLoc = 0; dFbrLoc =0, dFbrIdxLoc = 0, dFbrLoc2= 0; for (int bin = 0; bin < Opt.nBin; ++bin) hipStreamCreate(&streams[bin]); /*MTTKRP on Opt.mode*/ int MTTKRPmode = mode;//Opt.mode; for (int tile = 0; tile < Opt.nTile; ++tile){ /* matrix order according to mode order*/ for (int mm = 0; mm < TiledX[0].ndims; ++mm){ int curMode = TiledX[tile].modeOrder[mm]; dULoc[mm] = 0; for (int q = 0; q < curMode; ++q) dULoc[mm] += szDU[q % TiledX[0].ndims]; //1 2 3 0 } dBinLoc = 0; if(tile > 0) { dLoc += TiledX[tile-1].totNnz; dSlcLoc += TiledX[tile - 1].fbrPtr[0].size(); dSlcIdxLoc += TiledX[tile - 1].fbrIdx[0].size(); dFbrLoc += TiledX[tile - 1].fbrPtr[1].size(); dFbrIdxLoc += TiledX[tile - 1].fbrIdx[1].size(); dFbrLoc2 += ((TiledX[0].ndims == 4) ? TiledX[tile - 1].fbrPtr[2].size() : 0) ; } // BLOCKSIZE = (( slcAtomicFbrLvlPar == true) ? Opt.TBsize : 512) ; dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1); int smallBinEndsAt = 5; int slcPerTb = 0; // int warpPerFbr = BLOCKSIZE/32;//1;//Opt.warpPerSlice;//4;//; // int logOfWarpPerFbr = log2(warpPerFbr); // int bin = 0; // int fbrPerWarp = 1;//BLOCKSIZE/32; // dont overflow TB // int logOfFbrPerWarp = log2(fbrPerWarp); int warpPerFbr =Opt.warpPerSlice;//4;//; BLOCKSIZE/32;//1;// int logOfWarpPerFbr = log2(warpPerFbr); int fbrPerWarp = Opt.fiberPerWarp;//1;//BLOCKSIZE/32; // dont overflow TB int logOfFbrPerWarp = log2(fbrPerWarp ); double t0 = seconds(); cuda_timer_start(start); if(mode == TiledX[0].modeOrder[0]){ for (int bin = 0; bin < Opt.nBin ; ++bin){ if(bin < smallBinEndsAt){ ITYPE shSize = 0;//slcPerTb * 32 * sizeof(DTYPE); slcPerTb = 16 / warpPerSlc[bin]; dBinLoc += ((bin > 0) ? TiledX[tile].slcMapperBin[bin-1].size() : 0); grid.x = ( TbPerSlc[bin] * warpPerSlc[bin] * 32 * TiledX[tile].slcMapperBin[bin].size() + BLOCKSIZE - 1) / BLOCKSIZE; if(TiledX[0].ndims == 3) hipLaunchKernelGGL(( mttkrp_HCSR_kernel_smllBin), dim3(grid), dim3(block), shSize , streams[bin], dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc, dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, TiledX[tile].slcMapperBin[bin].size(), dU + dULoc[0], dU + dULoc[1], dU + dULoc[2], Opt.mode, Opt.R, warpPerSlc[bin], logOfWarpPerSlc[bin], TbPerSlc[bin], logOfTbPerSlc[bin]); else hipLaunchKernelGGL(( mttkrp_HCSR_kernel_smllBin_4D), dim3(grid), dim3(block), shSize , streams[bin], dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc, dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, TiledX[tile].slcMapperBin[bin].size(), dU + dULoc[0], dU + dULoc[1], dU + dULoc[2], dU + dULoc[3], Opt.mode, Opt.R, warpPerSlc[bin], logOfWarpPerSlc[bin], TbPerSlc[bin], logOfTbPerSlc[bin]); } // Processing heavy bin.. multiple TB per slice else{ dBinLoc += TiledX[tile].slcMapperBin[bin-1].size(); grid.x = (TbPerSlc[bin] * warpPerSlc[bin] * 32 * TiledX[tile].slcMapperBin[bin].size() + BLOCKSIZE - 1) / BLOCKSIZE; if(TiledX[0].ndims == 3) hipLaunchKernelGGL(( mttkrp_HCSR_kernel_hvyBin), dim3(grid), dim3(block), 0, streams[bin], dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc, dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, TiledX[tile].slcMapperBin[bin].size(), dU + dULoc[0], dU + dULoc[1], dU + dULoc[2], Opt.mode, Opt.R, warpPerSlc[bin], logOfWarpPerSlc[bin], TbPerSlc[bin], logOfTbPerSlc[bin]); else hipLaunchKernelGGL(( mttkrp_HCSR_kernel_hvyBin_4D), dim3(grid), dim3(block), 0, streams[bin], dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc, dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, TiledX[tile].slcMapperBin[bin].size(), dU + dULoc[0], dU + dULoc[1], dU + dULoc[2], dU + dULoc[3], Opt.mode, Opt.R, warpPerSlc[bin], logOfWarpPerSlc[bin], TbPerSlc[bin], logOfTbPerSlc[bin]); } } } else if(TiledX[0].ndims == 4 && TiledX[0].modeOrder[1] == MTTKRPmode && TiledX[0].totNnz){ for (int bin = 0; bin < Opt.nBin ; ++bin){ if(bin < smallBinEndsAt){ ITYPE shSize = 0;//slcPerTb * 32 * sizeof(DTYPE); slcPerTb = 16 / warpPerSlc[bin]; dBinLoc += ((bin > 0) ? TiledX[tile].slcMapperBin[bin-1].size() : 0); grid.x = ( TbPerSlc[bin] * warpPerSlc[bin] * 32 * TiledX[tile].slcMapperBin[bin].size() + BLOCKSIZE - 1) / BLOCKSIZE; hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_smllBin_fbrS_atomic_4D), dim3(grid), dim3(block), shSize , streams[bin], dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc, dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, TiledX[tile].slcMapperBin[bin].size(), dU + dULoc[1], dU + dULoc[2], dU + dULoc[3], dU + dULoc[0], Opt.mode, Opt.R, warpPerSlc[bin], logOfWarpPerSlc[bin], TbPerSlc[bin], logOfTbPerSlc[bin]); } // Processing heavy bin.. multiple TB per slice else{ dBinLoc += TiledX[tile].slcMapperBin[bin-1].size(); grid.x = (TbPerSlc[bin] * warpPerSlc[bin] * 32 * TiledX[tile].slcMapperBin[bin].size() + BLOCKSIZE - 1) / BLOCKSIZE; hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_hvyBin_fbrS_atomic_4D), dim3(grid), dim3(block), 0, streams[bin], dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc, dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, TiledX[tile].slcMapperBin[bin].size(), dU + dULoc[1], dU + dULoc[2], dU + dULoc[3], dU + dULoc[0], Opt.mode, Opt.R, warpPerSlc[bin], logOfWarpPerSlc[bin], TbPerSlc[bin], logOfTbPerSlc[bin]); } } } else if(mode == TiledX[0].modeOrder[TiledX[0].ndims-2]){ for (int bin = 0; bin < Opt.nBin ; ++bin){ if(bin < smallBinEndsAt){ ITYPE shSize = 0;//slcPerTb * 32 * sizeof(DTYPE); slcPerTb = 16 / warpPerSlc[bin]; dBinLoc += ((bin > 0) ? TiledX[tile].slcMapperBin[bin-1].size() : 0); grid.x = ( TbPerSlc[bin] * warpPerSlc[bin] * 32 * TiledX[tile].slcMapperBin[bin].size() + BLOCKSIZE - 1) / BLOCKSIZE; if(TiledX[0].ndims == 3) hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_smllBin_fbr_atomic), dim3(grid), dim3(block), shSize , streams[bin], dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc, dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, TiledX[tile].slcMapperBin[bin].size(), dU + dULoc[1], dU + dULoc[2], dU + dULoc[0], Opt.mode, Opt.R, warpPerSlc[bin], logOfWarpPerSlc[bin], TbPerSlc[bin], logOfTbPerSlc[bin]); else hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_smllBin_fbr_atomic_4D), dim3(grid), dim3(block), shSize , streams[bin], dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc, dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, TiledX[tile].slcMapperBin[bin].size(), dU + dULoc[2], dU + dULoc[3], dU + dULoc[0], dU + dULoc[1], Opt.mode, Opt.R, warpPerSlc[bin], logOfWarpPerSlc[bin], TbPerSlc[bin], logOfTbPerSlc[bin]); } // Processing heavy bin.. multiple TB per slice else{ dBinLoc += TiledX[tile].slcMapperBin[bin-1].size(); grid.x = (TbPerSlc[bin] * warpPerSlc[bin] * 32 * TiledX[tile].slcMapperBin[bin].size() + BLOCKSIZE - 1) / BLOCKSIZE; if(TiledX[0].ndims == 3) hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_hvyBin_fbr_atomic), dim3(grid), dim3(block), 0, streams[bin], dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc, dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, TiledX[tile].slcMapperBin[bin].size(), dU + dULoc[1], dU + dULoc[2], dU + dULoc[0], Opt.mode, Opt.R, warpPerSlc[bin], logOfWarpPerSlc[bin], TbPerSlc[bin], logOfTbPerSlc[bin]); else hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_hvyBin_fbr_atomic_4D), dim3(grid), dim3(block), 0, streams[bin], dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc, dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, TiledX[tile].slcMapperBin[bin].size(), dU + dULoc[2], dU + dULoc[3], dU + dULoc[0], dU + dULoc[1], Opt.mode, Opt.R, warpPerSlc[bin], logOfWarpPerSlc[bin], TbPerSlc[bin], logOfTbPerSlc[bin]); } } } else if(mode == TiledX[0].modeOrder[TiledX[0].ndims-1]){ for (int bin = 0; bin < Opt.nBin ; ++bin){ if(bin < smallBinEndsAt){ ITYPE shSize = 0;//slcPerTb * 32 * sizeof(DTYPE); slcPerTb = 16 / warpPerSlc[bin]; dBinLoc += ((bin > 0) ? TiledX[tile].slcMapperBin[bin-1].size() : 0); grid.x = ( TbPerSlc[bin] * warpPerSlc[bin] * 32 * TiledX[tile].slcMapperBin[bin].size() + BLOCKSIZE - 1) / BLOCKSIZE; if(TiledX[0].ndims == 3) hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_smllBin_all_atomic) , dim3(grid), dim3(block), shSize , streams[bin], dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc, dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, TiledX[tile].slcMapperBin[bin].size(), dU + dULoc[2], dU + dULoc[0], dU + dULoc[1], Opt.mode, Opt.R, warpPerSlc[bin], logOfWarpPerSlc[bin], TbPerSlc[bin], logOfTbPerSlc[bin]); else hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_smllBin_all_atomic_4D), dim3(grid), dim3(block), shSize , streams[bin], dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc, dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, TiledX[tile].slcMapperBin[bin].size(), dU + dULoc[3], dU + dULoc[0], dU + dULoc[1], dU + dULoc[2], Opt.mode, Opt.R, warpPerSlc[bin], logOfWarpPerSlc[bin], TbPerSlc[bin], logOfTbPerSlc[bin]); } // Processing heavy bin.. multiple TB per slice else{ dBinLoc += TiledX[tile].slcMapperBin[bin-1].size(); grid.x = (TbPerSlc[bin] * warpPerSlc[bin] * 32 * TiledX[tile].slcMapperBin[bin].size() + BLOCKSIZE - 1) / BLOCKSIZE; if(TiledX[0].ndims == 3) hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_hvyBin_all_atomic), dim3(grid), dim3(block), 0, streams[bin], dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc, dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, TiledX[tile].slcMapperBin[bin].size(), dU + dULoc[2], dU + dULoc[0], dU + dULoc[1], Opt.mode, Opt.R, warpPerSlc[bin], logOfWarpPerSlc[bin], TbPerSlc[bin], logOfTbPerSlc[bin]); else hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_hvyBin_all_atomic_4D), dim3(grid), dim3(block), 0, streams[bin], dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc, dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, TiledX[tile].slcMapperBin[bin].size(), dU + dULoc[3], dU + dULoc[0], dU + dULoc[1], dU + dULoc[2], Opt.mode, Opt.R, warpPerSlc[bin], logOfWarpPerSlc[bin], TbPerSlc[bin], logOfTbPerSlc[bin]); } } } cuda_timer_stop(start, stop, mili); CPUtimer += seconds() - t0; GPUTime += mili; // if(Opt.verbose) { cout << "Tile: " << tile << " - time: " << mili << "ms"; if(TiledX[0].ndims == 3){ cout << " nSlc: " << TiledX[tile].fbrIdx[0].size() << ", nFibers: " << TiledX[tile].fbrPtr[1].size() <<", nnz: " << TiledX[tile].totNnz; cout << endl; } else if(TiledX[0].ndims == 4){ cout << " nSlc: " << TiledX[tile].fbrIdx[0].size() << ", nSFibers: " << TiledX[tile].fbrPtr[1].size() << ", nFibers: " << TiledX[tile].fbrPtr[2].size() <<", nnz: " << TiledX[tile].totNnz; cout << endl; } } } allModeGPUTime += GPUTime; cout << "ONE-B-CSF-GPU-mode " << MTTKRPmode <<" :" << GPUTime << "," << endl; for (int bin = 0; bin < Opt.nBin; ++bin) hipStreamDestroy(streams[bin]); // check correctness checkCuda(hipMemcpy(&U[mode].vals[0], dU + mtxLoc, U[mode].nRows * U[mode].nCols * sizeof(DTYPE), hipMemcpyDeviceToHost), 0); hipFree(dVals); hipFree(dU); //hipFree(dU1); hipFree(dU2); hipFree(dU3); hipFree(dfbrIdx0); hipFree(dInds2); hipFree(dInds3); hipFree(dfbrIdx0); hipFree(dfbrIdx1); hipFree(dFbrIdx2); hipFree(dfbrPtr0); hipFree(dfbrPtr1); hipFree(dFbrPtr2); hipFree(dFbrLikeSlcInds); return 0; } int MTTKRP_HYB_GPU(const HYBTensor &HybX, Matrix *U, const Options &Opt){ //allocate and memcpy GPU memory //Tensor ITYPE *dCOOInds0, *dCOOInds1, *dCOOInds2, *dCOOInds3; ITYPE *dCSLSlcPtr, *dCSLSlcInds, *dCSLInds1, *dCSLInds2, *dCSLSlcMapperBin; ITYPE *dfbrPtr0, *dfbrIdx0, *dInds2, *dInds3, *dfbrPtr1, *dfbrIdx1, *dFbrPtr2, *dFbrIdx2, *dSlcMapperBin; DTYPE *dVals, *dCOOVals, *dCSLVals; ITYPE dLoc = 0, dSlcLoc = 0, dSlcIdxLoc = 0, dFbrLoc =0, dFbrIdxLoc =0, dBinLoc = 0, dCSLBinLoc = 0, dFbrLoc2 =0; int warpPerSlice = Opt.warpPerSlice; int logOfWarpPerSlice = log2(Opt.warpPerSlice); int TbPerSlc = 1; int logOfTPS = log2(TbPerSlc); // All tile same mode ITYPE mode0 = HybX.modeOrder[0]; ITYPE mode1 = HybX.modeOrder[1]; ITYPE mode2 = HybX.modeOrder[2]; ITYPE mode3 =((HybX.ndims == 4) ? HybX.modeOrder[3] : 0) ; // ****** mem op HYB COO ******* if(HybX.COOnnz > 0){ checkCuda(hipMalloc((void**) &dCOOVals, HybX.COOnnz * sizeof(DTYPE)), 0); checkCuda(hipMalloc((void**) &dCOOInds0, HybX.COOnnz * sizeof(ITYPE)), 0); checkCuda(hipMalloc((void**) &dCOOInds1, HybX.COOnnz * sizeof(ITYPE)), 0); checkCuda(hipMalloc((void**) &dCOOInds2, HybX.COOnnz * sizeof(ITYPE)), 0); checkCuda(hipMemcpy(dCOOVals, &(HybX.COOvals[0]), HybX.COOnnz * sizeof(DTYPE),hipMemcpyHostToDevice), 0); checkCuda(hipMemcpy(dCOOInds0, &(HybX.COOinds[mode0][0]), HybX.COOnnz * sizeof(ITYPE),hipMemcpyHostToDevice), 0); checkCuda(hipMemcpy(dCOOInds1, &(HybX.COOinds[mode1][0]), HybX.COOnnz * sizeof(ITYPE) ,hipMemcpyHostToDevice), 0); checkCuda(hipMemcpy(dCOOInds2, &(HybX.COOinds[mode2][0]), HybX.COOnnz * sizeof(ITYPE),hipMemcpyHostToDevice), 0); if(HybX.ndims == 4){ checkCuda(hipMalloc((void**) &dCOOInds3, HybX.COOnnz * sizeof(ITYPE)), 0); checkCuda(hipMemcpy(dCOOInds3, &(HybX.COOinds[mode3][0]), HybX.COOnnz * sizeof(ITYPE),hipMemcpyHostToDevice), 0); } } // ****** mem op HYB CSL ******* if(HybX.CSLnnz > 0){ checkCuda(hipMalloc((void**) &dCSLVals, HybX.CSLnnz * sizeof(DTYPE)), 0); checkCuda(hipMalloc((void**) &dCSLSlcPtr, HybX.CSLslicePtr.size() * sizeof(ITYPE)), 0); checkCuda(hipMalloc((void**) &dCSLSlcInds, HybX.CSLsliceIdx.size() * sizeof(ITYPE)), 0); checkCuda(hipMalloc((void**) &dCSLInds1, HybX.CSLnnz * sizeof(ITYPE)), 0); checkCuda(hipMalloc((void**) &dCSLInds2, HybX.CSLnnz * sizeof(ITYPE)), 0); checkCuda(hipMalloc((void**) &dCSLSlcMapperBin, HybX.CSLslicePtr.size() * sizeof(ITYPE)), 0); checkCuda(hipMemcpy(dCSLVals, &(HybX.CSLvals[0]), HybX.CSLnnz * sizeof(DTYPE),hipMemcpyHostToDevice), 0); checkCuda(hipMemcpy(dCSLSlcPtr + dSlcLoc, &(HybX.CSLslicePtr[0]), HybX.CSLslicePtr.size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0); checkCuda(hipMemcpy(dCSLSlcInds + dSlcIdxLoc, &(HybX.CSLsliceIdx[0]), HybX.CSLsliceIdx.size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0); checkCuda(hipMemcpy(dCSLInds1, &(HybX.CSLinds[mode1][0]), HybX.CSLnnz * sizeof(ITYPE) ,hipMemcpyHostToDevice), 0); checkCuda(hipMemcpy(dCSLInds2, &(HybX.CSLinds[mode2][0]), HybX.CSLnnz * sizeof(ITYPE),hipMemcpyHostToDevice), 0); dCSLBinLoc = 0; for (int bin = 0; bin < Opt.nBin; ++bin){ if(bin > 0) dCSLBinLoc += HybX.CSLslcMapperBin[bin-1].size(); if(HybX.CSLslcMapperBin[bin].size() > 0) checkCuda(hipMemcpy(dCSLSlcMapperBin + dSlcIdxLoc + dCSLBinLoc, &(HybX.CSLslcMapperBin[bin][0]), HybX.CSLslcMapperBin[bin].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0); } } // ****** mem op HYB HCSR ******* if(HybX.HCSRnnz > 0){ checkCuda(hipMalloc((void**) &dVals, HybX.HCSRnnz * sizeof(DTYPE)), 0); checkCuda(hipMalloc((void**) &dfbrPtr0, HybX.fbrPtr[0].size() * sizeof(ITYPE)), 0); checkCuda(hipMalloc((void**) &dfbrIdx0, HybX.fbrIdx[0].size() * sizeof(ITYPE)), 0); checkCuda(hipMalloc((void**) &dSlcMapperBin, HybX.fbrPtr[0].size() * sizeof(ITYPE)), 0); checkCuda(hipMalloc((void**) &dfbrPtr1, HybX.fbrPtr[1].size() * sizeof(ITYPE)), 0); checkCuda(hipMalloc((void**) &dfbrIdx1, HybX.fbrPtr[1].size() * sizeof(ITYPE)), 0); checkCuda(hipMemcpy(dVals, &(HybX.vals[0]), HybX.HCSRnnz * sizeof(DTYPE),hipMemcpyHostToDevice), 0); checkCuda(hipMemcpy(dfbrPtr0, &(HybX.fbrPtr[0][0]), HybX.fbrPtr[0].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0); checkCuda(hipMemcpy(dfbrIdx0, &(HybX.fbrIdx[0][0]), HybX.fbrIdx[0].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0); checkCuda(hipMemcpy(dfbrPtr1, &(HybX.fbrPtr[1][0]), HybX.fbrPtr[1].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0); checkCuda(hipMemcpy(dfbrIdx1, &(HybX.fbrIdx[1][0]), HybX.fbrPtr[1].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0); if(HybX.ndims == 3){ checkCuda(hipMalloc((void**) &dInds2, HybX.HCSRnnz * sizeof(ITYPE)), 0); checkCuda(hipMemcpy(dInds2, &(HybX.inds[mode2][0]), HybX.HCSRnnz * sizeof(ITYPE),hipMemcpyHostToDevice), 0); } if(HybX.ndims == 4){ checkCuda(hipMalloc((void**) &dFbrIdx2, HybX.fbrIdx[2].size() * sizeof(ITYPE)), 0); checkCuda(hipMalloc((void**) &dFbrPtr2, HybX.fbrPtr[2].size() * sizeof(ITYPE)), 0); checkCuda(hipMalloc((void**) &dInds3, HybX.HCSRnnz * sizeof(ITYPE)), 0); checkCuda(hipMemcpy(dFbrPtr2, &(HybX.fbrPtr[2][0]), HybX.fbrPtr[2].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0); checkCuda(hipMemcpy(dFbrIdx2, &(HybX.fbrIdx[2][0]), HybX.fbrIdx[2].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0); checkCuda(hipMemcpy(dInds3, &(HybX.inds[mode3][0]), HybX.HCSRnnz * sizeof(ITYPE),hipMemcpyHostToDevice), 0); } dBinLoc = 0; for (int bin = 0; bin < Opt.nBin; ++bin){ if(bin > 0) dBinLoc += HybX.slcMapperBin[bin-1].size(); if(HybX.slcMapperBin[bin].size() > 0) checkCuda(hipMemcpy(dSlcMapperBin + dSlcIdxLoc + dBinLoc, &(HybX.slcMapperBin[bin][0]), HybX.slcMapperBin[bin].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0); } } // //Matrices DTYPE *dU0, *dU1, *dU2, *dU3; checkCuda(hipMalloc((void**) &dU0, U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE)), 0); checkCuda(hipMalloc((void**) &dU1, U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE)), 0); checkCuda(hipMalloc((void**) &dU2, U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE)), 0); hipMemset(dU0, 0, U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE)); checkCuda(hipMemcpy(dU1, &(U[mode1].vals[0]), U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), 0); checkCuda(hipMemcpy(dU2, &(U[mode2].vals[0]), U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), 0); if(HybX.ndims == 4){ checkCuda(hipMalloc((void**) &dU3, U[mode3].nRows * U[mode3].nCols * sizeof(DTYPE)), 0); checkCuda(hipMemcpy(dU3, &(U[mode3].vals[0]), U[mode3].nRows * U[mode3].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), 0); } // BLOCK and GRID int BLOCKSIZE = 512; dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1); unsigned int rowInATB = BLOCKSIZE / (Opt.warpPerSlice*32); if(Opt.warpPerSlice * 32 > BLOCKSIZE){ cout << "BLOCKSIZE is smaller than work per slice! Increase BLOCKSIZE." << endl; exit(0); } hipEvent_t start, stop, HYBstart, HYBstop; hipEventCreate(&start); hipEventCreate(&stop); hipEventCreate(&HYBstart); hipEventCreate(&HYBstop); hipStream_t streams[2 * Opt.nBin + 1]; for (int bin = 0; bin < 2 * Opt.nBin + 1; ++bin) hipStreamCreate(&streams[bin]); float mili = 0, HYBmili =0, GPUTime = 0, CPUtimer = 0, HYBTime = 0; dLoc = 0, dSlcLoc = 0, dSlcIdxLoc = 0; dFbrLoc =0; bool useLoop = false; if(useLoop) grid.x = 32768*2; // mili = 0; dCSLBinLoc = 0; dBinLoc = 0; int smallBinEndsAt = 5; int slcPerTb = 0; cuda_timer_start(HYBstart); // ******* CUDA COO ******* // if(HybX.COOnnz > 0){ // BLOCKSIZE = 128; // block.x = BLOCKSIZE; // // /* Like PARTI loop */ = // if(!useLoop) // grid.x = (32 * HybX.COOnnz + BLOCKSIZE - 1) / BLOCKSIZE; // if(Opt.verbose) // cuda_timer_start(start); // if(!useLoop){ // if(HybX.ndims == 3) // hipLaunchKernelGGL(( mttkrp_HYB_COO_kernel), dim3(grid), dim3(block), 0, 0, dCOOVals, dCOOInds0, dCOOInds1, dCOOInds2, HybX.COOnnz, dU0, dU1, dU2, Opt.mode, Opt.R); // else if (HybX.ndims == 4) // hipLaunchKernelGGL(( mttkrp_HYB_COO_kernel_4D), dim3(grid), dim3(block), 0, 0, dCOOVals, dCOOInds0, dCOOInds1, dCOOInds2,dCOOInds3, HybX.COOnnz, dU0, dU1, dU2, dU3, Opt.mode, Opt.R); // } // else{ // if(HybX.ndims == 3) // mttkrp_HYB_COO_kernel_loop<<<grid, block, 0, 0>>>(dCOOVals, dCOOInds0, dCOOInds1, dCOOInds2, HybX.COOnnz, dU0, dU1, dU2, Opt.mode, Opt.R); // else if (HybX.ndims == 4) // mttkrp_HYB_COO_kernel_4D_loop<<<grid, block, 0, 0>>>(dCOOVals, dCOOInds0, dCOOInds1, dCOOInds2,dCOOInds3, HybX.COOnnz, dU0, dU1, dU2, dU3, Opt.mode, Opt.R); // } // if(Opt.verbose){ // cuda_timer_stop(start, stop, mili); // HYBTime += mili; // cout << "HYB-COO GPU " << mili << "ms"<< endl; // } // } // ******* CUDA CSL ******* // if(HybX.CSLnnz > 0 || HybX.HCSRnnz > 0) { if(HybX.COOnnz > 0){ BLOCKSIZE = 128; block.x = 128; grid.x = (32 * HybX.COOnnz + BLOCKSIZE - 1) / BLOCKSIZE; if(HybX.ndims == 3) hipLaunchKernelGGL(( mttkrp_HYB_COO_kernel), dim3(grid), dim3(block), 0, 0, dCOOVals, dCOOInds0, dCOOInds1, dCOOInds2, HybX.COOnnz, dU0, dU1, dU2, Opt.mode, Opt.R); else if (HybX.ndims == 4) hipLaunchKernelGGL(( mttkrp_HYB_COO_kernel_4D), dim3(grid), dim3(block), 0, 0, dCOOVals, dCOOInds0, dCOOInds1, dCOOInds2,dCOOInds3, HybX.COOnnz, dU0, dU1, dU2, dU3, Opt.mode, Opt.R); } BLOCKSIZE = 512; block.x = BLOCKSIZE; for (int bin = 0; bin < Opt.nBin ; ++bin){ dBinLoc += ((bin > 0) ? HybX.slcMapperBin[bin-1].size() : 0); dCSLBinLoc += ((bin > 0) ? HybX.CSLslcMapperBin[bin-1].size() : 0); if( HybX.slcMapperBin[bin].size() == 0 && HybX.CSLslcMapperBin[bin].size() == 0) continue; // Processing small bin.. merged to one. 1 WARP slice if(bin < smallBinEndsAt){ warpPerSlice = 1; logOfWarpPerSlice = 0;//log2(warpPerSlice); slcPerTb = 16 / warpPerSlice; /* CSL small bin */ if(HybX.CSLnnz > 0){ grid.x = ( warpPerSlice * 32 * HybX.CSLslcMapperBin[bin].size() + BLOCKSIZE - 1) / BLOCKSIZE; hipLaunchKernelGGL(( mttkrp_CSL_kernel_bin), dim3(grid), dim3(block), 0, streams[1], dCSLVals, dCSLSlcInds, dCSLSlcMapperBin + dCSLBinLoc, dCSLInds2, dCSLSlcPtr, dCSLInds1, HybX.CSLslcMapperBin[bin].size(), dU0, dU1, dU2, Opt.mode, Opt.R, warpPerSlice, logOfWarpPerSlice); } /* HCSR small bin */ if(HybX.HCSRnnz > 0){ grid.x = ( warpPerSlice * 32 * HybX.slcMapperBin[bin].size() + BLOCKSIZE - 1) / BLOCKSIZE; if(HybX.ndims == 3) hipLaunchKernelGGL(( mttkrp_HCSR_kernel_smllBin), dim3(grid), dim3(block), 0, streams[2], dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc, dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, HybX.slcMapperBin[bin].size(), dU0, dU1, dU2, Opt.mode, Opt.R, warpPerSlice, logOfWarpPerSlice, TbPerSlc, logOfTPS); else if(HybX.ndims == 4) hipLaunchKernelGGL(( mttkrp_HCSR_kernel_smllBin_4D), dim3(grid), dim3(block), 0, streams[2], dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc, dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, HybX.slcMapperBin[bin].size(), dU0, dU1, dU2, dU3, Opt.mode, Opt.R, warpPerSlice, logOfWarpPerSlice, TbPerSlc, logOfTPS); } } // Processing heavy bin.. multiple TB per slice else{ TbPerSlc = 1 << (bin - smallBinEndsAt + 1); // 1st big bin starts with 1 TB 1 << 1 not 1 << 5 if(TbPerSlc > 32) TbPerSlc = 32; logOfTPS = log2(TbPerSlc); warpPerSlice = 16; logOfWarpPerSlice = 4; /* CSL big bin */ if(HybX.CSLnnz > 0){ grid.x = (TbPerSlc * warpPerSlice * 32 * HybX.CSLslcMapperBin[bin].size() + BLOCKSIZE - 1) / BLOCKSIZE; hipLaunchKernelGGL(( mttkrp_CSL_kernel_hvyBin), dim3(grid), dim3(block), 0, streams[bin+1], dCSLVals + dLoc, dCSLSlcInds + dSlcIdxLoc, dCSLSlcMapperBin + dSlcIdxLoc + dCSLBinLoc, dCSLInds2 + dLoc, dCSLSlcPtr + dSlcLoc, dCSLInds1, HybX.CSLslcMapperBin[bin].size(), dU0, dU1, dU2, Opt.mode, Opt.R, warpPerSlice, logOfWarpPerSlice, TbPerSlc, logOfTPS); } /* HCSR big bin */ if(HybX.HCSRnnz > 0){ grid.x = (TbPerSlc * warpPerSlice * 32 * HybX.slcMapperBin[bin].size() + BLOCKSIZE - 1) / BLOCKSIZE; if(HybX.ndims == 3) hipLaunchKernelGGL(( mttkrp_HCSR_kernel_hvyBin), dim3(grid), dim3(block), 0, streams[bin+2], dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc, dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, HybX.slcMapperBin[bin].size(), dU0, dU1, dU2, Opt.mode, Opt.R, warpPerSlice, logOfWarpPerSlice, TbPerSlc, logOfTPS); else if(HybX.ndims == 4) hipLaunchKernelGGL(( mttkrp_HCSR_kernel_hvyBin_4D), dim3(grid), dim3(block), 0, streams[bin + 2], dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc, dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, HybX.slcMapperBin[bin].size(), dU0, dU1, dU2, dU3, Opt.mode, Opt.R, warpPerSlice, logOfWarpPerSlice, TbPerSlc, logOfTPS); } } } // if(Opt.verbose){ // cuda_timer_stop(start, stop, mili); // HYBTime += mili; // cout << "CSL+HCSR GPU-time: " << mili << "ms"<< endl; // } } cuda_timer_stop(HYBstart, HYBstop, HYBmili); if(Opt.verbose) cout << "verbose on. HYB GPU: " << HYBmili << endl; else cout << "HYB GPU: " << HYBmili << endl; for (int bin = 0; bin < 2 * Opt.nBin + 1; ++bin) hipStreamDestroy(streams[bin]); // check correctness checkCuda(hipMemcpy(&U[mode0].vals[0], dU0, U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE), hipMemcpyDeviceToHost), 0); hipFree(dVals); hipFree(dCOOVals); hipFree(dCSLVals); hipFree(dU0); hipFree(dU1); hipFree(dU2); hipFree(dfbrIdx0); hipFree(dInds2); hipFree(dInds3); hipFree(dfbrIdx0); hipFree(dfbrIdx1); hipFree(dFbrIdx2); hipFree(dfbrPtr0); hipFree(dfbrPtr1); hipFree(dFbrPtr2); hipFree(dCSLInds1); hipFree(dCSLInds2); hipFree(dCSLSlcPtr); hipFree(dCSLSlcInds); hipFree(dCOOInds0); hipFree(dCOOInds1); hipFree(dCOOInds2); return 0; } int MTTKRP_ONE_HCSR_GPU(TiledTensor *TiledX, Matrix *U, const Options &Opt){ bool performMTTKRPMode = true, performMTTKRPnMode = true, performMTTKRPnnMode = true; /* Allocate and memcpy GPU memory */ //Tensor ITYPE *dInds2, *dInds3, *dfbrPtr0, *dfbrIdx0, *dfbrPtr1, *dfbrIdx1, *dFbrPtr2, *dFbrIdx2, *dSlcMapperBin, *dFbrLikeSlcInds; DTYPE *dVals; ITYPE dLoc = 0, dSlcLoc = 0, dSlcIdxLoc = 0, dFbrLoc =0, dFbrIdxLoc =0, dBinLoc = 0, dFbrLoc2 =0, dFbrLikeSlcIndsLoc = 0; ITYPE totNnz = 0, totSlcPtr = 0, totSlcIdx = 0, totFbrPtr = 0, totFbrIdx = 0, totFbrPtr2 = 0; // // All tile same mode ITYPE mode0 = 0;//TiledX[0].modeOrder[0]; ITYPE mode1 = 1;//TiledX[0].modeOrder[1]; ITYPE mode2 = 2;//TiledX[0].modeOrder[2]; ITYPE mode3 = 3;//((TiledX[0].ndims == 4) ? TiledX[0].modeOrder[3] : 0) ; ITYPE R = Opt.R; for (int tile = 0; tile < Opt.nTile; ++tile){ totNnz += TiledX[tile].totNnz; totSlcPtr += TiledX[tile].fbrPtr[0].size() ; totSlcIdx += TiledX[tile].fbrIdx[0].size() ; totFbrPtr += TiledX[tile].fbrPtr[1].size() ; totFbrIdx += TiledX[tile].fbrIdx[1].size() ; totFbrPtr2 += ((TiledX[tile].ndims == 4) ? TiledX[tile].fbrPtr[2].size() : 0) ; } double t0 = seconds(); checkCuda(hipMalloc((void**) &dVals, totNnz * sizeof(DTYPE)), 0); checkCuda(hipMalloc((void**) &dfbrPtr0, totSlcPtr * sizeof(ITYPE)), 0); checkCuda(hipMalloc((void**) &dfbrIdx0, totSlcIdx * sizeof(ITYPE)), 0); checkCuda(hipMalloc((void**) &dSlcMapperBin, totSlcPtr * sizeof(ITYPE)), 0); checkCuda(hipMalloc((void**) &dfbrPtr1, totFbrPtr * sizeof(ITYPE)), 0); checkCuda(hipMalloc((void**) &dfbrIdx1, totFbrIdx * sizeof(ITYPE)), 0); checkCuda(hipMalloc((void**) &dFbrLikeSlcInds, totFbrIdx * sizeof(ITYPE)), 0); if(TiledX[0].ndims == 3) checkCuda(hipMalloc((void**) &dInds2, totNnz * sizeof(ITYPE)), 0); if(TiledX[0].ndims == 4){ checkCuda(hipMalloc((void**) &dFbrIdx2, totFbrPtr2 * sizeof(ITYPE)), 0); checkCuda(hipMalloc((void**) &dFbrPtr2, totFbrPtr2 * sizeof(ITYPE)), 0); checkCuda(hipMalloc((void**) &dInds3, totNnz * sizeof(ITYPE)), 0); } /* cuda memcopy for tiled parts*/ for (int tile = 0; tile < Opt.nTile; ++tile){ if(tile > 0) { dLoc += TiledX[tile-1].totNnz; dSlcLoc += TiledX[tile - 1].fbrPtr[0].size(); // all tile same dSlcIdxLoc += TiledX[tile - 1].fbrIdx[0].size(); dFbrLoc += TiledX[tile - 1].fbrPtr[1].size(); dFbrIdxLoc += TiledX[tile - 1].fbrIdx[1].size(); dFbrLoc2 += ((TiledX[tile].ndims == 4) ? TiledX[tile - 1].fbrPtr[2].size() : 0) ; } checkCuda(hipMemcpy(dVals + dLoc, &(TiledX[tile].vals[0]), TiledX[tile].totNnz * sizeof(DTYPE),hipMemcpyHostToDevice), 0); checkCuda(hipMemcpy(dfbrPtr0 + dSlcLoc, &(TiledX[tile].fbrPtr[0][0]), TiledX[tile].fbrPtr[0].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0); checkCuda(hipMemcpy(dfbrIdx0 + dSlcIdxLoc, &(TiledX[tile].fbrIdx[0][0]), TiledX[tile].fbrIdx[0].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0); checkCuda(hipMemcpy(dfbrPtr1 + dFbrLoc, &(TiledX[tile].fbrPtr[1][0]), TiledX[tile].fbrPtr[1].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0); checkCuda(hipMemcpy(dfbrIdx1 + dFbrIdxLoc, &(TiledX[tile].fbrIdx[1][0]), TiledX[tile].fbrIdx[1].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0); checkCuda(hipMemcpy(dFbrLikeSlcInds + dFbrIdxLoc, &(TiledX[tile].fbrLikeSlcInds[0]), TiledX[tile].fbrIdx[1].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0); if(TiledX[tile].ndims == 3) checkCuda(hipMemcpy(dInds2 + dLoc, &(TiledX[tile].inds[TiledX[tile].modeOrder[2]][0]), TiledX[tile].totNnz * sizeof(ITYPE),hipMemcpyHostToDevice), 0); if(TiledX[tile].ndims == 4){ checkCuda(hipMemcpy(dFbrPtr2 + dFbrLoc2, &(TiledX[tile].fbrPtr[2][0]), TiledX[tile].fbrPtr[2].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0); checkCuda(hipMemcpy(dFbrIdx2 + dFbrLoc2, &(TiledX[tile].fbrIdx[2][0]), TiledX[tile].fbrIdx[2].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0); checkCuda(hipMemcpy(dInds3 + dLoc, &(TiledX[tile].inds[TiledX[0].modeOrder[3]][0]), TiledX[tile].totNnz * sizeof(ITYPE),hipMemcpyHostToDevice), 0); } dBinLoc = 0; for (int bin = 0; bin < Opt.nBin; ++bin){ if(bin > 0) dBinLoc += TiledX[tile].slcMapperBin[bin-1].size(); checkCuda(hipMemcpy(dSlcMapperBin + dSlcIdxLoc + dBinLoc, &(TiledX[tile].slcMapperBin[bin][0]), TiledX[tile].slcMapperBin[bin].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0); } } float tnsMemcpyTime = seconds() - t0; t0 = seconds(); unsigned int *dULoc = new unsigned int[TiledX[0].ndims]; unsigned int *szDU = new unsigned int[TiledX[0].ndims]; // //Matrices DTYPE *dU;// *dU0, *dU1, *dU2, *dU3; ITYPE mtxSize = ((TiledX[0].ndims == 3) ? (U[mode0].nRows + U[mode1].nRows + U[mode2].nRows) * U[mode0].nCols : (U[mode0].nRows + U[mode1].nRows + U[mode2].nRows + U[mode3].nRows) * U[mode0].nCols ); checkCuda(hipMalloc((void**) &dU, mtxSize * sizeof(DTYPE)), 0); for (int m = 0; m < TiledX[0].ndims; ++m) szDU[m] = U[m].nRows * U[m].nCols; hipMemset(dU+0, 0, U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE)); checkCuda(hipMemcpy(dU + szDU[0], &(U[mode1].vals[0]), U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), 0); checkCuda(hipMemcpy(dU + szDU[0] + szDU[1], &(U[mode2].vals[0]), U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), 0); float mtxMemcpyTime = seconds() - t0; // cout << "tns and mtx memcopy time: " << tnsMemcpyTime <<", " << mtxMemcpyTime<< endl; if(TiledX[0].ndims == 4) checkCuda(hipMemcpy(dU + szDU[0] + szDU[1] + szDU[2], &(U[mode3].vals[0]), U[mode3].nRows * U[mode3].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), 0); // BLOCK and GRID int BLOCKSIZE = 512; unsigned int rowInATB = BLOCKSIZE / (Opt.warpPerSlice*32); // if(Opt.warpPerSlice * 32 > BLOCKSIZE){ // cout << "BLOCKSIZE is smaller than work per slice! Increase BLOCKSIZE." << endl; // exit(0); // } hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipStream_t streams[Opt.nBin]; float mili = 0, GPUTime = 0, CPUtimer = 0, allModeGPUTime = 0; int smallBinEndsAt = 5; /* Warp per slice and threadblock per size */ int *warpPerSlc = new int[Opt.nBin]; int *logOfWarpPerSlc = new int[Opt.nBin]; int *TbPerSlc = new int[Opt.nBin]; int *logOfTbPerSlc = new int[Opt.nBin]; for (int bin = 0; bin < Opt.nBin ; ++bin){ TbPerSlc[bin] = 1; warpPerSlc[bin] = ((bin > 0) ? 2 << (bin - 1) : 1); if(warpPerSlc[bin] > 16) warpPerSlc[bin] = 16; logOfWarpPerSlc[bin] = log2(warpPerSlc[bin]); TbPerSlc[bin] = 1; logOfTbPerSlc[bin] = 0; if (bin >= smallBinEndsAt){ TbPerSlc[bin] = 1 << (bin - smallBinEndsAt + 1); // 1st big bin starts with 1 TB 1 << 1 not 1 << 5 if(TbPerSlc[bin] > 32) TbPerSlc[bin] = 32; logOfTbPerSlc[bin] = log2(TbPerSlc[bin]); warpPerSlc[bin] = 16; logOfWarpPerSlc[bin] = 4; } } // TBD: change warpPerSlc to warpPerSlc[bin] and all int slcPerTb = 1; dLoc = 0, dSlcLoc = 0, dSlcIdxLoc = 0; dFbrLoc =0, dFbrIdxLoc = 0, dFbrLoc2= 0, dFbrLikeSlcIndsLoc = 0; for (int bin = 0; bin < Opt.nBin; ++bin) hipStreamCreate(&streams[bin]); /*MTTKRP on Opt.mode*/ unsigned int dU0Loc, dU1Loc, dU2Loc , dU3Loc; /* matrix order according to mode order*/ for (int m = 0; m < TiledX[0].ndims; ++m){ int curMode = TiledX[0].modeOrder[m]; dULoc[m] = 0; for (int q = 0; q < curMode; ++q){ dULoc[m] += szDU[q % TiledX[0].ndims]; //1 2 3 0 } } for (int MTTKRPmode = 0; MTTKRPmode < TiledX[0].ndims; ++MTTKRPmode){ if(MTTKRPmode > 0){ mili = 0; GPUTime = 0; CPUtimer = 0; dLoc = 0; dSlcLoc = 0; dSlcIdxLoc = 0; dFbrLoc =0; dFbrIdxLoc = 0; dFbrLoc2= 0, dFbrLikeSlcIndsLoc = 0; // MTTKRP on mode mode 0 changed DU0. To pass correctness for now initializing to 2 again. int mode = MTTKRPmode - 1; for(long r = 0; r < U[mode].nRows; ++r){ for(long c = 0; c < U[mode].nCols; ++c) // or u[mode].nCols U[mode].vals[r * U[mode].nCols + c] = mode + .5;// 0.1 * drand48(); //1 ;//(r * R + c + 1); // } if(MTTKRPmode == 1){ checkCuda(hipMemcpy(dU + 0, &(U[mode0].vals[0]), U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), 0); hipMemset(dU + szDU[0], 0, U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE)); } else if(MTTKRPmode == 2){ checkCuda(hipMemcpy(dU + szDU[0], &(U[mode1].vals[0]), U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), 0); hipMemset(dU + szDU[0] + szDU[1], 0, U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE)); } else if(MTTKRPmode == 3){ checkCuda(hipMemcpy(dU + szDU[0] + szDU[1] , &(U[mode2].vals[0]), U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), 0); hipMemset(dU + szDU[0] + szDU[1] + szDU[2], 0, U[mode3].nRows * U[mode3].nCols * sizeof(DTYPE)); } } if(performMTTKRPMode && TiledX[0].modeOrder[0] == MTTKRPmode){ // if(Opt.verbose) cout << "Slc atomics - " ; for (int tile = 0; tile < Opt.nTile; ++tile){ dBinLoc = 0; if(tile > 0) { dLoc += TiledX[tile-1].totNnz; dSlcLoc += TiledX[tile - 1].fbrPtr[0].size(); dSlcIdxLoc += TiledX[tile - 1].fbrIdx[0].size(); dFbrLoc += TiledX[tile - 1].fbrPtr[1].size(); dFbrIdxLoc += TiledX[tile - 1].fbrIdx[1].size(); dFbrLoc2 += ((TiledX[0].ndims == 4) ? TiledX[tile - 1].fbrPtr[2].size() : 0) ; } BLOCKSIZE = Opt.TBsize; dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1); int smallBinEndsAt = 5; int slcPerTb = 0; int warpPerFbr =Opt.warpPerSlice;//4;//; BLOCKSIZE/32;//1;// int logOfWarpPerFbr = log2(warpPerFbr); int bin = 0; bool useLoop = false; int fbrPerWarp = Opt.fiberPerWarp;//1;//BLOCKSIZE/32; // dont overflow TB int logOfFbrPerWarp = log2(fbrPerWarp ); // int fbrPerWarp = 1;//BLOCKSIZE/32; // dont overflow TB // int logOfFbrPerWarp = log2(fbrPerWarp ); if( (warpPerFbr > (BLOCKSIZE/32)) || (fbrPerWarp > (BLOCKSIZE/32)) ){ cout << "warpPerFbr (-w) or fbrPerWarp (-s) cannot be higher than threadblock size!" << endl << "hint: increase -b!" << endl; exit(0); } /* Like PARTI loop */ if(useLoop) grid.x = Opt.gridSize;// 32768*16; else grid.x = ( warpPerFbr * 32 * ((TiledX[tile].nFibers+fbrPerWarp-1)/fbrPerWarp) + BLOCKSIZE - 1) / BLOCKSIZE; double t0 = seconds(); cuda_timer_start(start); if(TiledX[0].ndims == 3) hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_slc_atomic_fbrLvlPar), dim3(grid), dim3(block), 0, 0, dVals + dLoc, dFbrLikeSlcInds + dFbrLoc, dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, TiledX[tile].nFibers, dU + dULoc[0], dU + dULoc[1], dU + dULoc[2], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr, fbrPerWarp, logOfFbrPerWarp); else if(TiledX[0].ndims == 4) hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_slc_atomic_fbrLvlPar_4D), dim3(grid), dim3(block), 0, 0, dVals + dLoc, dFbrLikeSlcInds + dFbrLoc, dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, TiledX[tile].nFibers, dU + dULoc[0], dU + dULoc[1], dU + dULoc[2], dU + dULoc[3], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr, fbrPerWarp, logOfFbrPerWarp); cuda_timer_stop(start, stop, mili); CPUtimer += seconds() - t0; GPUTime += mili; if(Opt.verbose){ cout << "Tile: " << tile << " - time: " << mili << "ms"; cout <<" nnz: " << TiledX[tile].totNnz << " nFibers: " << TiledX[tile].fbrPtr[1].size() << " nSlc " << TiledX[tile].fbrIdx[0].size() << " "; cout << endl; } } allModeGPUTime += GPUTime; cout << "singleCSF-GPU-mode " << MTTKRPmode <<" :" << GPUTime << "," << endl; } /*processing fbrS level for 4D tensor*/ else if(TiledX[0].ndims == 4 && performMTTKRPnMode && TiledX[0].modeOrder[1] == MTTKRPmode){ // if(Opt.verbose) cout << "FbrS atomics - " ; mili = 0, GPUTime = 0, CPUtimer = 0; dLoc = 0; dSlcLoc = 0; dSlcIdxLoc = 0; dFbrLoc =0; dFbrIdxLoc = 0; dFbrLoc2= 0, dFbrLikeSlcIndsLoc = 0; for (int tile = 0; tile < Opt.nTile; ++tile){ dBinLoc = 0; if(tile > 0) { dLoc += TiledX[tile-1].totNnz; dSlcLoc += TiledX[tile - 1].fbrPtr[0].size(); dSlcIdxLoc += TiledX[tile - 1].fbrIdx[0].size(); dFbrLoc += TiledX[tile - 1].fbrPtr[1].size(); dFbrIdxLoc += TiledX[tile - 1].fbrIdx[1].size(); dFbrLoc2 += ((TiledX[0].ndims == 4) ? TiledX[tile - 1].fbrPtr[2].size() : 0) ; } // cout <<"might wanna change binning style and Block size, logWPC, COO like parallelism, allow mode sort" << endl; BLOCKSIZE = Opt.TBsize; dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1); int smallBinEndsAt = 5; int slcPerTb = 0; int warpPerFbr = Opt.warpPerSlice;//4;//;BLOCKSIZE/32;// if(warpPerFbr > (BLOCKSIZE/32)){ cout << "warpPerFbr (-w) cannot be higher than threadblock size! hint: increase -b!" << endl; exit(0); } int logOfWarpPerFbr = log2(warpPerFbr); int bin = 0; grid.x = ( warpPerFbr * 32 * TiledX[tile].nFibers + BLOCKSIZE - 1) / BLOCKSIZE; double t0 = seconds(); cuda_timer_start(start); hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_fbrS_atomic_fbrLvlPar_4D), dim3(grid), dim3(block), 0, streams[bin], dVals + dLoc, dFbrLikeSlcInds + dFbrLoc, dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, TiledX[tile].nFibers, dU + dULoc[1], dU + dULoc[2], dU + dULoc[3], dU + dULoc[0], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr); cuda_timer_stop(start, stop, mili); CPUtimer += seconds() - t0; GPUTime += mili; if(Opt.verbose){ cout << "Tile: " << tile << " - time: " << mili << "ms"; cout <<" nnz: " << TiledX[tile].totNnz << " nFibers: " << TiledX[tile].fbrPtr[1].size() << " nSlc " << TiledX[tile].fbrIdx[0].size() << " "; cout << endl; } } allModeGPUTime += GPUTime; cout << "singleCSF-GPU-mode " << MTTKRPmode <<" :" << GPUTime << "," << endl; } else if(performMTTKRPnMode && TiledX[0].modeOrder[TiledX[0].ndims-2] == MTTKRPmode){ // if(Opt.verbose) cout << "Fbr atomics - " ; mili = 0, GPUTime = 0, CPUtimer = 0; dLoc = 0; dSlcLoc = 0; dSlcIdxLoc = 0; dFbrLoc =0; dFbrIdxLoc = 0; dFbrLoc2= 0, dFbrLikeSlcIndsLoc = 0; for (int tile = 0; tile < Opt.nTile; ++tile){ dBinLoc = 0; if(tile > 0) { dLoc += TiledX[tile-1].totNnz; dSlcLoc += TiledX[tile - 1].fbrPtr[0].size(); dSlcIdxLoc += TiledX[tile - 1].fbrIdx[0].size(); dFbrLoc += TiledX[tile - 1].fbrPtr[1].size(); dFbrIdxLoc += TiledX[tile - 1].fbrIdx[1].size(); dFbrLoc2 += ((TiledX[0].ndims == 4) ? TiledX[tile - 1].fbrPtr[2].size() : 0) ; } // cout <<"might wanna change binning style and Block size, logWPC, COO like parallelism, allow mode sort" << endl; BLOCKSIZE = Opt.TBsize; dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1); int smallBinEndsAt = 5; int slcPerTb = 0; int warpPerFbr = Opt.warpPerSlice;//4;//;BLOCKSIZE/32;// if(warpPerFbr > (BLOCKSIZE/32)){ cout << "warpPerFbr (-w) cannot be higher than threadblock size! hint: increase -b!" << endl; exit(0); } int logOfWarpPerFbr = log2(warpPerFbr); int bin = 0; bool useLoop = false; // /* Like PARTI loop */ = if(useLoop) grid.x = Opt.gridSize;// 32768*16; else grid.x = ( warpPerFbr * 32 * TiledX[tile].nFibers + BLOCKSIZE - 1) / BLOCKSIZE; double t0 = seconds(); cuda_timer_start(start); if(useLoop) hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_fbr_atomic_fbrLvlPar_loop), dim3(grid), dim3(block), 0, streams[bin], dVals + dLoc, dFbrLikeSlcInds + dFbrLoc, dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, TiledX[tile].nFibers, dU + dULoc[1], dU + dULoc[2], dU + dULoc[0], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr); else{ if(TiledX[0].ndims == 3) hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_fbr_atomic_fbrLvlPar), dim3(grid), dim3(block), 0, streams[bin], dVals + dLoc, dFbrLikeSlcInds + dFbrLoc, dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, TiledX[tile].nFibers, dU + dULoc[1], dU + dULoc[2], dU + dULoc[0], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr); else if (TiledX[0].ndims == 4) hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_fbr_atomic_fbrLvlPar_4D), dim3(grid), dim3(block), 0, streams[bin], dVals + dLoc, dFbrLikeSlcInds + dFbrLoc, dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, TiledX[tile].nFibers, dU + dULoc[2], dU + dULoc[3], dU + dULoc[0], dU + dULoc[1], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr); } cuda_timer_stop(start, stop, mili); CPUtimer += seconds() - t0; GPUTime += mili; if(Opt.verbose){ cout << "Tile: " << tile << " - time: " << mili << "ms"; cout <<" nnz: " << TiledX[tile].totNnz << " nFibers: " << TiledX[tile].fbrPtr[1].size() << " nSlc " << TiledX[tile].fbrIdx[0].size() << " "; cout << endl; } } allModeGPUTime += GPUTime; cout << "singleCSF-GPU-mode " << MTTKRPmode <<" :" << GPUTime << "," << endl; } else if(performMTTKRPnnMode && TiledX[0].modeOrder[TiledX[0].ndims-1] == MTTKRPmode){ // if(Opt.verbose) cout << "Nnz atomics - " ; mili = 0, GPUTime = 0, CPUtimer = 0; dLoc = 0; dSlcLoc = 0; dSlcIdxLoc = 0; dFbrLoc =0; dFbrIdxLoc = 0; dFbrLoc2= 0, dFbrLikeSlcIndsLoc = 0; for (int tile = 0; tile < Opt.nTile; ++tile){ dBinLoc = 0; if(tile > 0) { dLoc += TiledX[tile-1].totNnz; dSlcLoc += TiledX[tile - 1].fbrPtr[0].size(); dSlcIdxLoc += TiledX[tile - 1].fbrIdx[0].size(); dFbrLoc += TiledX[tile - 1].fbrPtr[1].size(); dFbrIdxLoc += TiledX[tile - 1].fbrIdx[1].size(); dFbrLoc2 += ((TiledX[0].ndims == 4) ? TiledX[tile - 1].fbrPtr[2].size() : 0) ; } BLOCKSIZE = Opt.TBsize; dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1); bool useLoop = false; int smallBinEndsAt = 5; int slcPerTb = 0; int warpPerFbr = Opt.warpPerSlice;//4;//;BLOCKSIZE/32;// if(warpPerFbr > (BLOCKSIZE/32)){ cout << "warpPerFbr (-w) cannot be higher than threadblock size! hint: increase -b!" << endl; exit(0); } int logOfWarpPerFbr = log2(warpPerFbr); int bin = 0; // /* Like PARTI loop */ = if(useLoop) grid.x = Opt.gridSize;// 32768; else grid.x = ( warpPerFbr * 32 * TiledX[tile].nFibers + BLOCKSIZE - 1) / BLOCKSIZE; int dloc = 0; double t0 = seconds(); cuda_timer_start(start); if(useLoop) hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_all_atomic_fbrLvlPar_loop), dim3(grid), dim3(block), 0, streams[bin], dVals + dLoc, dFbrLikeSlcInds + dFbrLoc, dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, TiledX[tile].nFibers, dU + dULoc[2], dU + dULoc[0], dU + dULoc[1], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr); else{ if (TiledX[0].ndims == 3) hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_all_atomic_fbrLvlPar), dim3(grid), dim3(block), 0, streams[bin], dVals + dLoc, dFbrLikeSlcInds + dFbrLoc, dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, TiledX[tile].nFibers, dU + dULoc[2], dU + dULoc[0], dU + dULoc[1], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr); else if (TiledX[0].ndims == 4) hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_all_atomic_fbrLvlPar_4D), dim3(grid), dim3(block), 0, streams[bin], dVals + dLoc, dFbrLikeSlcInds + dFbrLoc, dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, TiledX[tile].nFibers, dU + dULoc[3], dU + dULoc[0], dU + dULoc[1], dU + dULoc[2], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr); } cuda_timer_stop(start, stop, mili); CPUtimer += seconds() - t0; GPUTime += mili; if(Opt.verbose){ cout << "Tile: " << tile << " - time: " << mili << "ms"; cout <<" nnz: " << TiledX[tile].totNnz << " nFibers: " << TiledX[tile].fbrPtr[1].size() << " nSlc " << TiledX[tile].fbrIdx[0].size() << " "; cout << endl; } } allModeGPUTime += GPUTime; cout << "singleCSF-GPU-mode " << MTTKRPmode <<" :" << GPUTime << "," << endl; } } cout << "Total GPU time: " << allModeGPUTime << ", nnz:" << TiledX[0].totNnz << ", nFibers:" << TiledX[0].fbrPtr[1].size() << ", nSlc:" << TiledX[0].fbrIdx[0].size() << endl; for (int bin = 0; bin < Opt.nBin; ++bin) hipStreamDestroy(streams[bin]); /* Copying output matrix from GPU to CPU for correctness check */ int MTTKRPmode = TiledX[0].ndims - 1; ITYPE loc = ((TiledX[0].ndims == 3) ? szDU[0] + szDU[1] : szDU[0] + szDU[1] + szDU[2]); checkCuda(hipMemcpy(&U[MTTKRPmode].vals[0], dU + loc, U[MTTKRPmode].nRows * U[MTTKRPmode].nCols * sizeof(DTYPE), hipMemcpyDeviceToHost), 0); // check correctness // if(Opt.impType == 14){ // MTTKRPmode = 3; // checkCuda(hipMemcpy(&U[MTTKRPmode].vals[0] , dU + szDU[0] +szDU[1] + szDU[2], U[MTTKRPmode].nRows * U[MTTKRPmode].nCols * sizeof(DTYPE), hipMemcpyDeviceToHost), 0); // } // else // checkCuda(hipMemcpy(&U[mode0].vals[0], dU, U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE), hipMemcpyDeviceToHost), 0); hipFree(dVals); hipFree(dU); //hipFree(dU1); hipFree(dU2); hipFree(dU3); hipFree(dfbrIdx0); hipFree(dInds2); hipFree(dInds3); hipFree(dfbrIdx0); hipFree(dfbrIdx1); hipFree(dFbrIdx2); hipFree(dfbrPtr0); hipFree(dfbrPtr1); hipFree(dFbrPtr2); hipFree(dFbrLikeSlcInds); return 0; } int MTTKRP_MIHCSR_GPU(TiledTensor *TiledX, Matrix *U, const Options &Opt){ ITYPE *dInds2, *dInds3, *dfbrPtr0, *dfbrIdx0, *dfbrPtr1, *dfbrIdx1, *dFbrPtr2, *dFbrIdx2, *dFbrLikeSlcInds; DTYPE *dVals; ITYPE dLoc = 0, dSlcLoc = 0, dSlcIdxLoc = 0, dFbrLoc =0, dFbrIdxLoc =0, dBinLoc = 0, dFbrLoc2 =0; ITYPE totNnz = 0, totSlcPtr = 0, totSlcIdx = 0, totFbrPtr = 0, totFbrIdx = 0, totFbrPtr2 = 0; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); float memcpyTime = 0; // All m same mode ITYPE mode0 = 0;//TiledX[0].modeOrder[0]; ITYPE mode1 = 1;;//TiledX[0].modeOrder[1]; ITYPE mode2 = 2;//TiledX[0].modeOrder[2]; ITYPE mode3 = 3;//((TiledX[0].ndims == 4) ? TiledX[0].modeOrder[3] : 0) ; for (int m = 0; m < TiledX[0].ndims; ++m){ if (TiledX[m].totNnz == 0) continue; totNnz += TiledX[m].totNnz; totSlcPtr += TiledX[m].fbrPtr[0].size() ; totSlcIdx += TiledX[m].fbrIdx[0].size() ; totFbrPtr += TiledX[m].fbrPtr[1].size() ; totFbrIdx += TiledX[m].fbrIdx[1].size() ; totFbrPtr2 += ((TiledX[m].ndims == 4) ? TiledX[m].fbrPtr[2].size() : 0) ; } //allocate and memcpy GPU memory //Tensor cuda_timer_start(start); checkCuda(hipMalloc((void**) &dVals, totNnz * sizeof(DTYPE)), 0); checkCuda(hipMalloc((void**) &dfbrPtr0, totSlcPtr * sizeof(ITYPE)), 0); checkCuda(hipMalloc((void**) &dfbrIdx0, totSlcIdx * sizeof(ITYPE)), 0); // checkCuda(hipMalloc((void**) &dSlcMapperBin, totSlcPtr * sizeof(ITYPE)), 0); checkCuda(hipMalloc((void**) &dfbrPtr1, totFbrPtr * sizeof(ITYPE)), 0); checkCuda(hipMalloc((void**) &dfbrIdx1, totFbrIdx * sizeof(ITYPE)), 0); checkCuda(hipMalloc((void**) &dFbrLikeSlcInds, totFbrIdx * sizeof(ITYPE)), 0); if(TiledX[0].ndims == 3) checkCuda(hipMalloc((void**) &dInds2, totNnz * sizeof(ITYPE)), 0); if(TiledX[0].ndims == 4){ checkCuda(hipMalloc((void**) &dFbrIdx2, totFbrPtr2 * sizeof(ITYPE)), 0); checkCuda(hipMalloc((void**) &dFbrPtr2, totFbrPtr2 * sizeof(ITYPE)), 0); checkCuda(hipMalloc((void**) &dInds3, totNnz * sizeof(ITYPE)), 0); } /* cuda memcopy for tiled parts*/ for (int m = 0; m < TiledX[0].ndims; ++m){ if(m > 0) { if (TiledX[m-1].totNnz > 0) { dLoc += TiledX[m-1].totNnz; dSlcLoc += TiledX[m - 1].fbrPtr[0].size(); // all m same dSlcIdxLoc += TiledX[m - 1].fbrIdx[0].size(); dFbrLoc += TiledX[m - 1].fbrPtr[1].size(); dFbrIdxLoc += TiledX[m - 1].fbrIdx[1].size(); dFbrLoc2 += ((TiledX[m].ndims == 4) ? TiledX[m - 1].fbrPtr[2].size() : 0) ; } } if (TiledX[m].totNnz == 0) continue; checkCuda(hipMemcpy(dVals + dLoc, &(TiledX[m].vals[0]), TiledX[m].totNnz * sizeof(DTYPE),hipMemcpyHostToDevice), 0); checkCuda(hipMemcpy(dfbrPtr0 + dSlcLoc, &(TiledX[m].fbrPtr[0][0]), TiledX[m].fbrPtr[0].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0); checkCuda(hipMemcpy(dfbrIdx0 + dSlcIdxLoc, &(TiledX[m].fbrIdx[0][0]), TiledX[m].fbrIdx[0].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0); checkCuda(hipMemcpy(dfbrPtr1 + dFbrLoc, &(TiledX[m].fbrPtr[1][0]), TiledX[m].fbrPtr[1].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0); checkCuda(hipMemcpy(dfbrIdx1 + dFbrIdxLoc, &(TiledX[m].fbrIdx[1][0]), TiledX[m].fbrIdx[1].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0); checkCuda(hipMemcpy(dFbrLikeSlcInds + dFbrIdxLoc, &(TiledX[m].fbrLikeSlcInds[0]), TiledX[m].fbrIdx[1].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0); if(TiledX[m].ndims == 3){ if(m == 0) // checkCuda(hipMemcpy(dInds2 + dLoc, &(TiledX[m].inds[mode2][0]), TiledX[m].totNnz * sizeof(ITYPE),hipMemcpyHostToDevice), 0); checkCuda(hipMemcpy(dInds2 + dLoc, &(TiledX[m].inds[TiledX[m].modeOrder[2]][0]), TiledX[m].totNnz * sizeof(ITYPE),hipMemcpyHostToDevice), 0); else if(m == 1) checkCuda(hipMemcpy(dInds2 + dLoc, &(TiledX[m].inds[TiledX[m].modeOrder[2]][0]), TiledX[m].totNnz * sizeof(ITYPE),hipMemcpyHostToDevice), 0); else if(m == 2) checkCuda(hipMemcpy(dInds2 + dLoc, &(TiledX[m].inds[TiledX[m].modeOrder[2]][0]), TiledX[m].totNnz * sizeof(ITYPE),hipMemcpyHostToDevice), 0); } if(TiledX[m].ndims == 4){ checkCuda(hipMemcpy(dFbrPtr2 + dFbrLoc2, &(TiledX[m].fbrPtr[2][0]), TiledX[m].fbrPtr[2].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0); checkCuda(hipMemcpy(dFbrIdx2 + dFbrLoc2, &(TiledX[m].fbrIdx[2][0]), TiledX[m].fbrIdx[2].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0); checkCuda(hipMemcpy(dInds3 + dLoc, &(TiledX[m].inds[TiledX[m].modeOrder[3]][0]), TiledX[m].totNnz * sizeof(ITYPE),hipMemcpyHostToDevice), 0); } } cuda_timer_stop(start, stop, memcpyTime); cout << "CPU to GPU Memcopy time: " << memcpyTime << endl; // //Matrices unsigned int *dULoc = new unsigned int[TiledX[0].ndims]; unsigned int *szDU = new unsigned int[TiledX[0].ndims]; // //Matrices DTYPE *dU;// *dU0, *dU1, *dU2, *dU3; ITYPE mtxSize = ((TiledX[0].ndims == 3) ? (U[mode0].nRows + U[mode1].nRows + U[mode2].nRows) * U[mode0].nCols : (U[mode0].nRows + U[mode1].nRows + U[mode2].nRows + U[mode3].nRows) * U[mode0].nCols ); checkCuda(hipMalloc((void**) &dU, mtxSize * sizeof(DTYPE)), 0); for (int m = 0; m < TiledX[0].ndims; ++m) szDU[m] = U[m].nRows * U[m].nCols; hipMemset(dU+0, 0, U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE)); checkCuda(hipMemcpy(dU + szDU[0], &(U[mode1].vals[0]), U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), 0); checkCuda(hipMemcpy(dU + szDU[0] + szDU[1], &(U[mode2].vals[0]), U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), 0); if(TiledX[0].ndims == 4) checkCuda(hipMemcpy(dU + szDU[0] + szDU[1] + szDU[2], &(U[mode3].vals[0]), U[mode3].nRows * U[mode3].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), 0); // BLOCK and GRID int BLOCKSIZE = 512; unsigned int rowInATB = BLOCKSIZE / (Opt.warpPerSlice*32); // if(Opt.warpPerSlice * 32 > BLOCKSIZE){ // cout << "BLOCKSIZE is smaller than work per slice! Increase BLOCKSIZE." << endl; // exit(0); // } hipStream_t streams[Opt.nBin]; float mili = 0, GPUTime = 0, CPUtimer = 0, allModeGPUTime = 0; int smallBinEndsAt = 5; /* Warp per slice and threadblock per slice */ int *warpPerSlc = new int[Opt.nBin]; int *logOfWarpPerSlc = new int[Opt.nBin]; int *TbPerSlc = new int[Opt.nBin]; int *logOfTbPerSlc = new int[Opt.nBin]; for (int bin = 0; bin < Opt.nBin ; ++bin){ TbPerSlc[bin] = 1; warpPerSlc[bin] = ((bin > 0) ? 2 << (bin - 1) : 1); if(warpPerSlc[bin] > 16) warpPerSlc[bin] = 16; logOfWarpPerSlc[bin] = log2(warpPerSlc[bin]); TbPerSlc[bin] = 1; logOfTbPerSlc[bin] = 0; if (bin >= smallBinEndsAt){ TbPerSlc[bin] = 1 << (bin - smallBinEndsAt + 1); // 1st big bin starts with 1 TB 1 << 1 not 1 << 5 if(TbPerSlc[bin] > 32) TbPerSlc[bin] = 32; logOfTbPerSlc[bin] = log2(TbPerSlc[bin]); warpPerSlc[bin] = 16; logOfWarpPerSlc[bin] = 4; } } // TBD: change warpPerSlc to warpPerSlc[bin] and all int slcPerTb = 1; dLoc = 0, dSlcLoc = 0, dSlcIdxLoc = 0; dFbrLoc =0, dFbrIdxLoc = 0, dFbrLoc2= 0; for (int bin = 0; bin < Opt.nBin; ++bin) hipStreamCreate(&streams[bin]); for (int MTTKRPmode = 0; MTTKRPmode < TiledX[0].ndims; ++MTTKRPmode){ if(MTTKRPmode > 0){ mili = 0; GPUTime = 0; CPUtimer = 0; dLoc = 0; dSlcLoc = 0; dSlcIdxLoc = 0; dFbrLoc =0; dFbrIdxLoc = 0; dFbrLoc2= 0; // MTTKRP on mode mode 0 changed DU0. To pass correctness for now initializing to 2 again. int mode = MTTKRPmode - 1; for(long r = 0; r < U[mode].nRows; ++r){ for(long c = 0; c < U[mode].nCols; ++c) // or u[mode].nCols U[mode].vals[r * U[mode].nCols + c] = mode + .5;// 0.1 * drand48(); //1 ;//(r * R + c + 1); // } if(MTTKRPmode == 1){ checkCuda(hipMemcpy(dU + 0, &(U[mode0].vals[0]), U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), 0); hipMemset(dU + szDU[0], 0, U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE)); } else if(MTTKRPmode == 2){ checkCuda(hipMemcpy(dU + szDU[0], &(U[mode1].vals[0]), U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), 0); hipMemset(dU + szDU[0] + szDU[1], 0, U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE)); } else if(MTTKRPmode == 3){ checkCuda(hipMemcpy(dU + szDU[0] + szDU[1] , &(U[mode2].vals[0]), U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), 0); hipMemset(dU + szDU[0] + szDU[1] + szDU[2], 0, U[mode3].nRows * U[mode3].nCols * sizeof(DTYPE)); } } for (int m = 0; m < TiledX[0].ndims; ++m){ /* matrix order according to mode order*/ for (int mm = 0; mm < TiledX[0].ndims; ++mm){ int curMode = TiledX[m].modeOrder[mm]; dULoc[mm] = 0; for (int q = 0; q < curMode; ++q) dULoc[mm] += szDU[q % TiledX[0].ndims]; //1 2 3 0 } dBinLoc = 0; if(m > 0) { if (TiledX[m-1].totNnz > 0) { dLoc += TiledX[m-1].totNnz; dSlcLoc += TiledX[m - 1].fbrPtr[0].size(); dSlcIdxLoc += TiledX[m - 1].fbrIdx[0].size(); dFbrLoc += TiledX[m - 1].fbrPtr[1].size(); dFbrIdxLoc += TiledX[m - 1].fbrIdx[1].size(); dFbrLoc2 += ((TiledX[0].ndims == 4) ? TiledX[m - 1].fbrPtr[2].size(): 0) ; } } BLOCKSIZE = 512; dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1); if (TiledX[m].totNnz == 0) continue; cuda_timer_start(start); if(TiledX[m].modeOrder[0] == MTTKRPmode && TiledX[m].totNnz){ if(Opt.verbose) cout << "Slc atomics - " ; // BLOCKSIZE = 128; BLOCKSIZE = Opt.TBsize; dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1); int warpPerFbr = Opt.warpPerSlice;//4;//; int logOfWarpPerFbr = log2(warpPerFbr); int fbrPerWarp = Opt.fiberPerWarp;//1;//BLOCKSIZE/32; // dont overflow TB int logOfFbrPerWarp = log2(fbrPerWarp ); if( (warpPerFbr > (BLOCKSIZE/32)) || (fbrPerWarp > (BLOCKSIZE/32)) ){ cout << "warpPerFbr (-w) or fbrPerWarp (-s) cannot be higher than threadblock size!" << endl << "hint: increase -b!" << endl; exit(0); } grid.x = ( warpPerFbr * 32 * ((TiledX[m].nFibers + fbrPerWarp-1)/fbrPerWarp) + BLOCKSIZE - 1) / BLOCKSIZE; if(TiledX[0].ndims == 3) hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_slc_atomic_fbrLvlPar), dim3(grid), dim3(block), 0, 0, dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc, dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, TiledX[m].nFibers, dU + dULoc[0], dU + dULoc[1], dU + dULoc[2], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr, fbrPerWarp, logOfFbrPerWarp); else if(TiledX[0].ndims == 4) hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_slc_atomic_fbrLvlPar_4D), dim3(grid), dim3(block), 0, 0, dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc, dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, TiledX[m].nFibers, dU + dULoc[0], dU + dULoc[1], dU + dULoc[2], dU + dULoc[3], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr, fbrPerWarp, logOfFbrPerWarp); } else if(TiledX[0].ndims == 4 && TiledX[m].modeOrder[1] == MTTKRPmode && TiledX[m].totNnz){ if(Opt.verbose) cout << "FbrS atomics - "; BLOCKSIZE = Opt.TBsize; dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1); int warpPerFbr = Opt.warpPerSlice;//1;//BLOCKSIZE/32;//1;////4;//; if(warpPerFbr > (BLOCKSIZE/32)){ cout << "warpPerFbr (-w) cannot be higher than threadblock size! hint: increase -b!" << endl; exit(0); } int logOfWarpPerFbr = log2(warpPerFbr); grid.x = ( warpPerFbr * 32 * TiledX[m].nFibers + BLOCKSIZE - 1) / BLOCKSIZE; hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_fbrS_atomic_fbrLvlPar_4D), dim3(grid), dim3(block), 0, 0, dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc, dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, TiledX[m].nFibers, dU + dULoc[1], dU + dULoc[2], dU + dULoc[3], dU + dULoc[0], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr); } else if(TiledX[m].modeOrder[TiledX[0].ndims-2] == MTTKRPmode && TiledX[m].totNnz){ if(Opt.verbose) cout << "Fbr atomics - "; BLOCKSIZE = Opt.TBsize; dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1); int warpPerFbr = Opt.warpPerSlice;//4;//;BLOCKSIZE/32;// if(warpPerFbr > (BLOCKSIZE/32)){ cout << "warpPerFbr (-w) cannot be higher than threadblock size! hint: increase -b!" << endl; exit(0); } int logOfWarpPerFbr = log2(warpPerFbr); grid.x = ( warpPerFbr * 32 * TiledX[m].nFibers + BLOCKSIZE - 1) / BLOCKSIZE; if(TiledX[0].ndims == 3) hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_fbr_atomic_fbrLvlPar), dim3(grid), dim3(block), 0, 0, dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc, dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, TiledX[m].nFibers, dU + dULoc[1], dU + dULoc[2], dU + dULoc[0], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr); else if (TiledX[0].ndims == 4) hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_fbr_atomic_fbrLvlPar_4D), dim3(grid), dim3(block), 0, 0, dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc, dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, TiledX[m].nFibers, dU + dULoc[2], dU + dULoc[3], dU + dULoc[0], dU + dULoc[1], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr); } else if(TiledX[m].modeOrder[TiledX[0].ndims-1] == MTTKRPmode && TiledX[m].totNnz){ if(Opt.verbose) cout << "nnz atomics - " ; BLOCKSIZE = Opt.TBsize; dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1); int warpPerFbr = Opt.warpPerSlice;//4;//;BLOCKSIZE/32;// if(warpPerFbr > (BLOCKSIZE/32)){ cout << "warpPerFbr (-w) cannot be higher than threadblock size! hint: increase -b!" << endl; exit(0); } int logOfWarpPerFbr = log2(warpPerFbr); grid.x = ( warpPerFbr * 32 * TiledX[m].nFibers + BLOCKSIZE - 1) / BLOCKSIZE; if (TiledX[0].ndims == 3) hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_all_atomic_fbrLvlPar), dim3(grid), dim3(block), 0, 0, dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc, dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, TiledX[m].nFibers, dU + dULoc[2], dU + dULoc[0], dU + dULoc[1], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr); else if (TiledX[0].ndims == 4) hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_all_atomic_fbrLvlPar_4D), dim3(grid), dim3(block), 0, 0, dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc, dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, TiledX[m].nFibers, dU + dULoc[3], dU + dULoc[0], dU + dULoc[1], dU + dULoc[2], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr); } cuda_timer_stop(start, stop, mili); GPUTime += mili; if(Opt.verbose) { cout << "Tile: " << m << " - time: " << mili << " ms"; cout <<" nnz: " << TiledX[m].totNnz << " nFibers: " << TiledX[m].fbrPtr[1].size() << " nSlc " << TiledX[m].fbrIdx[0].size() << " "; cout << " modeOrder: " << TiledX[m].modeOrder[0] <<" " << TiledX[m].modeOrder[1] <<" " << TiledX[m].modeOrder[2]; cout << endl; } } if(Opt.verbose) cout << "MI-HCSR-GPU-mode "<< MTTKRPmode <<" : " << GPUTime << "," << endl; allModeGPUTime += GPUTime; } int totalMIslics = 0, totalMISfibers = 0, totalMIfibers = 0, totalMInnz = 0;; for (int m = 0; m < TiledX[0].ndims; ++m){ if(TiledX[m].totNnz){ if(TiledX[m].ndims == 3){ totalMIslics += TiledX[m].fbrIdx[0].size(); totalMIfibers += TiledX[m].fbrPtr[1].size(); totalMInnz += TiledX[m].totNnz; } if(TiledX[m].ndims == 4){ totalMIslics += TiledX[m].fbrIdx[0].size(); totalMISfibers += TiledX[m].fbrPtr[1].size(); totalMIfibers += TiledX[m].fbrPtr[2].size(); totalMInnz += TiledX[m].totNnz; } } } cout << "Total GPU time: " << allModeGPUTime; // if(Opt.verbose) if(TiledX[0].ndims == 3) cout << " nSlc:" << totalMIslics << ", nFibers:" << totalMIfibers << ", nnz:" << totalMInnz << endl; else if(TiledX[0].ndims == 4) cout << " nSlc:" << totalMIslics << ", nSFibers:" << totalMISfibers << ", nFibers:" << totalMIfibers << ", nnz:" << totalMInnz << endl; for (int bin = 0; bin < Opt.nBin; ++bin) hipStreamDestroy(streams[bin]); /* Copying output matrix from GPU to CPU for correctness check */ int MTTKRPmode = TiledX[0].ndims - 1; ITYPE loc = ((TiledX[0].ndims == 3) ? szDU[0] + szDU[1] : szDU[0] + szDU[1] + szDU[2]); checkCuda(hipMemcpy(&U[MTTKRPmode].vals[0], dU + loc, U[MTTKRPmode].nRows * U[MTTKRPmode].nCols * sizeof(DTYPE), hipMemcpyDeviceToHost), 0); hipFree(dVals); hipFree(dU); //hipFree(dU1); hipFree(dU2); hipFree(dU3); hipFree(dfbrIdx0); hipFree(dInds2); hipFree(dInds3); hipFree(dfbrIdx0); hipFree(dfbrIdx1); hipFree(dFbrIdx2); hipFree(dfbrPtr0); hipFree(dfbrPtr1); hipFree(dFbrPtr2); hipFree(dFbrLikeSlcInds); return 0; } int init_GPU(TiledTensor *TiledX, Matrix *U, const Options &Opt, ITYPE **dInds2, ITYPE **dfbrPtr1, ITYPE **dfbrIdx1, ITYPE **dFbrLikeSlcInds, DTYPE **dVals, DTYPE **dU){ ITYPE mode0 = 0;//TiledX[0].modeOrder[0]; ITYPE mode1 = 1;;//TiledX[0].modeOrder[1]; ITYPE mode2 = 2;//TiledX[0].modeOrder[2]; ITYPE mode3 = 3;//((TiledX[0].ndims == 4) ? TiledX[0].modeOrder[3] : 0) ; // if(iter == 0 && cpdMode == 0) ITYPE dLoc = 0, dSlcLoc = 0, dSlcIdxLoc = 0, dFbrLoc =0, dFbrIdxLoc =0, dBinLoc = 0, dFbrLoc2 =0; ITYPE totNnz = 0, totSlcPtr = 0, totSlcIdx = 0, totFbrPtr = 0, totFbrIdx = 0, totFbrPtr2 = 0; for (int m = 0; m < TiledX[0].ndims; ++m){ if (TiledX[m].totNnz == 0) continue; totNnz += TiledX[m].totNnz; totFbrPtr += TiledX[m].fbrPtr[1].size() ; totFbrIdx += TiledX[m].fbrIdx[1].size() ; totFbrPtr2 += ((TiledX[m].ndims == 4) ? TiledX[m].fbrPtr[2].size() : 0) ; } /*allocate and memcpy GPU memory*/ checkCuda(hipMalloc((void**) dVals, totNnz * sizeof(DTYPE)), 0); checkCuda(hipMalloc((void**) dfbrPtr1, totFbrPtr * sizeof(ITYPE)), 0); checkCuda(hipMalloc((void**) dfbrIdx1, totFbrIdx * sizeof(ITYPE)), 0); checkCuda(hipMalloc((void**) dFbrLikeSlcInds, totFbrIdx * sizeof(ITYPE)), 0); if(TiledX[0].ndims == 3) checkCuda(hipMalloc((void**) dInds2, totNnz * sizeof(ITYPE)), 0); for (int m = 0; m < TiledX[0].ndims; ++m){ if(m > 0) { if (TiledX[m-1].totNnz > 0) { dLoc += TiledX[m-1].totNnz; dSlcLoc += TiledX[m - 1].fbrPtr[0].size(); // all m same dSlcIdxLoc += TiledX[m - 1].fbrIdx[0].size(); dFbrLoc += TiledX[m - 1].fbrPtr[1].size(); dFbrIdxLoc += TiledX[m - 1].fbrIdx[1].size(); } } if (TiledX[m].totNnz == 0) continue; checkCuda(hipMemcpy(*dVals + dLoc, &(TiledX[m].vals[0]), TiledX[m].totNnz * sizeof(DTYPE),hipMemcpyHostToDevice), 0); checkCuda(hipMemcpy(*dfbrPtr1 + dFbrLoc, &(TiledX[m].fbrPtr[1][0]), TiledX[m].fbrPtr[1].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0); checkCuda(hipMemcpy(*dfbrIdx1 + dFbrIdxLoc, &(TiledX[m].fbrIdx[1][0]), TiledX[m].fbrIdx[1].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0); checkCuda(hipMemcpy(*dFbrLikeSlcInds + dFbrIdxLoc, &(TiledX[m].fbrLikeSlcInds[0]), TiledX[m].fbrIdx[1].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0); checkCuda(hipMemcpy(*dInds2 + dLoc, &(TiledX[m].inds[TiledX[m].modeOrder[2]][0]), TiledX[m].totNnz * sizeof(ITYPE),hipMemcpyHostToDevice), 0); } // //Matrices unsigned int *szDU = new unsigned int[TiledX[0].ndims]; ITYPE mtxSize = ((TiledX[0].ndims == 3) ? (U[mode0].nRows + U[mode1].nRows + U[mode2].nRows) * U[mode0].nCols : (U[mode0].nRows + U[mode1].nRows + U[mode2].nRows + U[mode3].nRows) * U[mode0].nCols ); for (int m = 0; m < TiledX[0].ndims; ++m) szDU[m] = U[m].nRows * U[m].nCols; checkCuda(hipMalloc((void**) dU, mtxSize * sizeof(DTYPE)), 0); // hipMemset(dU+0, 0, U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE)); checkCuda(hipMemcpy(*dU + 0, &(U[mode0].vals[0]), U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), 0); checkCuda(hipMemcpy(*dU + szDU[0], &(U[mode1].vals[0]), U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), 0); checkCuda(hipMemcpy(*dU + szDU[0] + szDU[1], &(U[mode2].vals[0]), U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), 0); // MTTKRP_MIHCSR_GPU_oneMode_forCPD(TiledX, U, Opt, 0, 0, // dInds2, dfbrPtr1, dfbrIdx1, dFbrLikeSlcInds, dVals, dU); } int MTTKRP_MIHCSR_GPU_oneMode_forCPD(TiledTensor *TiledX, Matrix *U, const Options &Opt, int cpdMode, int iter, ITYPE *dInds2, ITYPE *dfbrPtr1, ITYPE *dfbrIdx1, ITYPE *dFbrLikeSlcInds, DTYPE *dVals, DTYPE *dU){ hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); float mili; ITYPE *dInds3, *dfbrPtr0, *dfbrIdx0, *dFbrPtr2, *dFbrIdx2, *dSlcMapperBin; // DTYPE *dVals; ITYPE dLoc = 0, dSlcLoc = 0, dSlcIdxLoc = 0, dFbrLoc =0, dFbrIdxLoc =0, dBinLoc = 0, dFbrLoc2 =0; unsigned int *dULoc = new unsigned int[TiledX[0].ndims]; unsigned int *szDU = new unsigned int[TiledX[0].ndims]; for (int m = 0; m < TiledX[0].ndims; ++m) szDU[m] = U[m].nRows * U[m].nCols; ITYPE loc = 0; for (int m = 0; m < cpdMode; ++m) loc += szDU[m]; hipMemset(dU+loc, 0, U[cpdMode].nRows * U[cpdMode].nCols * sizeof(DTYPE)); // BLOCK and GRID int BLOCKSIZE = 512; float GPUTime = 0, CPUtimer = 0, allModeGPUTime = 0; dLoc = 0, dSlcLoc = 0, dSlcIdxLoc = 0; dFbrLoc =0, dFbrIdxLoc = 0, dFbrLoc2= 0; int MTTKRPmode = cpdMode; // for (int MTTKRPmode = 0; MTTKRPmode < TiledX[0].ndims; ++MTTKRPmode) { for (int m = 0; m < TiledX[0].ndims; ++m){ /* matrix order according to mode order*/ for (int mm = 0; mm < TiledX[0].ndims; ++mm){ int curMode = TiledX[m].modeOrder[mm]; dULoc[mm] = 0; for (int q = 0; q < curMode; ++q) dULoc[mm] += szDU[q % TiledX[0].ndims]; //1 2 3 0 } dBinLoc = 0; if(m > 0) { if (TiledX[m-1].totNnz > 0) { dLoc += TiledX[m-1].totNnz; dSlcLoc += TiledX[m - 1].fbrPtr[0].size(); dSlcIdxLoc += TiledX[m - 1].fbrIdx[0].size(); dFbrLoc += TiledX[m - 1].fbrPtr[1].size(); dFbrIdxLoc += TiledX[m - 1].fbrIdx[1].size(); dFbrLoc2 += ((TiledX[0].ndims == 4) ? TiledX[m - 1].fbrPtr[2].size(): 0) ; } } BLOCKSIZE = 512; dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1); if (TiledX[m].totNnz == 0) continue; cuda_timer_start(start); if(TiledX[m].modeOrder[0] == MTTKRPmode && TiledX[m].totNnz){ // if(Opt.verbose) // cout << "Slc atomics - " ; // BLOCKSIZE = 128; BLOCKSIZE = Opt.TBsize; dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1); int warpPerFbr = Opt.warpPerSlice;//4;//; int logOfWarpPerFbr = log2(warpPerFbr); int fbrPerWarp = Opt.fiberPerWarp;//1;//BLOCKSIZE/32; // dont overflow TB int logOfFbrPerWarp = log2(fbrPerWarp ); if( (warpPerFbr > (BLOCKSIZE/32)) || (fbrPerWarp > (BLOCKSIZE/32)) ){ cout << "warpPerFbr (-w) or fbrPerWarp (-s) cannot be higher than threadblock size!" << endl << "hint: increase -b!" << endl; exit(0); } grid.x = ( warpPerFbr * 32 * ((TiledX[m].nFibers + fbrPerWarp-1)/fbrPerWarp) + BLOCKSIZE - 1) / BLOCKSIZE; if(TiledX[0].ndims == 3) hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_slc_atomic_fbrLvlPar), dim3(grid), dim3(block), 0, 0, dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc, dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, TiledX[m].nFibers, dU + dULoc[0], dU + dULoc[1], dU + dULoc[2], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr, fbrPerWarp, logOfFbrPerWarp); else if(TiledX[0].ndims == 4) hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_slc_atomic_fbrLvlPar_4D), dim3(grid), dim3(block), 0, 0, dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc, dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, TiledX[m].nFibers, dU + dULoc[0], dU + dULoc[1], dU + dULoc[2], dU + dULoc[3], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr, fbrPerWarp, logOfFbrPerWarp); } else if(TiledX[m].modeOrder[TiledX[0].ndims-2] == MTTKRPmode && TiledX[m].totNnz){ // if(Opt.verbose) // cout << "Fbr atomics - "; BLOCKSIZE = Opt.TBsize; dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1); int warpPerFbr = Opt.warpPerSlice;//4;//;BLOCKSIZE/32;// if(warpPerFbr > (BLOCKSIZE/32)){ cout << "warpPerFbr (-w) cannot be higher than threadblock size! hint: increase -b!" << endl; exit(0); } int logOfWarpPerFbr = log2(warpPerFbr); grid.x = ( warpPerFbr * 32 * TiledX[m].nFibers + BLOCKSIZE - 1) / BLOCKSIZE; if(TiledX[0].ndims == 3) hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_fbr_atomic_fbrLvlPar), dim3(grid), dim3(block), 0, 0, dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc, dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, TiledX[m].nFibers, dU + dULoc[1], dU + dULoc[2], dU + dULoc[0], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr); else if (TiledX[0].ndims == 4) hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_fbr_atomic_fbrLvlPar_4D), dim3(grid), dim3(block), 0, 0, dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc, dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, TiledX[m].nFibers, dU + dULoc[2], dU + dULoc[3], dU + dULoc[0], dU + dULoc[1], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr); } else if(TiledX[m].modeOrder[TiledX[0].ndims-1] == MTTKRPmode && TiledX[m].totNnz){ // if(Opt.verbose) // cout << "nnz atomics - " ; BLOCKSIZE = Opt.TBsize; dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1); int warpPerFbr = Opt.warpPerSlice;//4;//;BLOCKSIZE/32;// if(warpPerFbr > (BLOCKSIZE/32)){ cout << "warpPerFbr (-w) cannot be higher than threadblock size! hint: increase -b!" << endl; exit(0); } int logOfWarpPerFbr = log2(warpPerFbr); grid.x = ( warpPerFbr * 32 * TiledX[m].nFibers + BLOCKSIZE - 1) / BLOCKSIZE; if (TiledX[0].ndims == 3) hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_all_atomic_fbrLvlPar), dim3(grid), dim3(block), 0, 0, dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc, dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, TiledX[m].nFibers, dU + dULoc[2], dU + dULoc[0], dU + dULoc[1], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr); else if (TiledX[0].ndims == 4) hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_all_atomic_fbrLvlPar_4D), dim3(grid), dim3(block), 0, 0, dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc, dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, TiledX[m].nFibers, dU + dULoc[3], dU + dULoc[0], dU + dULoc[1], dU + dULoc[2], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr); } cuda_timer_stop(start, stop, mili); GPUTime += mili; if(Opt.verbose) { cout << "Tile: " << m << " - time: " << mili << " ms"; cout <<" nnz: " << TiledX[m].totNnz << " nFibers: " << TiledX[m].fbrPtr[1].size() << " nSlc " << TiledX[m].fbrIdx[0].size() << " "; cout << " modeOrder: " << TiledX[m].modeOrder[0] <<" " << TiledX[m].modeOrder[1] <<" " << TiledX[m].modeOrder[2]; cout << endl; } } // cout << "MI-HCSR-GPU-mode "<< MTTKRPmode <<" : " << GPUTime << "," << endl; allModeGPUTime += GPUTime; } // ITYPE loc = 0; // for (int m = 0; m < cpdMode; ++m) // loc += szDU[m]; // ITYPE loc = szDU[0]; /* Copying output matrix from GPU to CPU for correctness check */ checkCuda(hipMemcpy(&U[cpdMode].vals[0], dU + loc, U[cpdMode].nRows * U[cpdMode].nCols * sizeof(DTYPE), hipMemcpyDeviceToHost), 0); if(iter == Opt.cpdIters - 1 && cpdMode == TiledX[0].ndims - 1) { cout << "Freeing variable " << endl; hipFree(dVals); hipFree(dU); //hipFree(dU1); hipFree(dU2); hipFree(dU3); hipFree(dfbrIdx0); hipFree(dInds2); hipFree(dInds3); hipFree(dfbrIdx0); hipFree(dfbrIdx1); hipFree(dFbrIdx2); hipFree(dfbrPtr0); hipFree(dfbrPtr1); hipFree(dFbrPtr2); hipFree(dFbrLikeSlcInds); } return 0; }
f3f4775a10ff82bf47d9a215632281849408b1db.cu
/** * * OHIO STATE UNIVERSITY SOFTWARE DISTRIBUTION LICENSE * * Load-balanced sparse MTTKRP on GPUs (the “Software”) Copyright (c) 2019, The Ohio State * University. All rights reserved. * * The Software is available for download and use subject to the terms and * conditions of this License. Access or use of the Software constitutes acceptance * and agreement to the terms and conditions of this License. Redistribution and * use of the Software in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the capitalized paragraph below. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the capitalized paragraph below in the documentation * and/or other materials provided with the distribution. * * 3. The names of Ohio State University, or its faculty, staff or students may not * be used to endorse or promote products derived from the Software without * specific prior written permission. * * THIS SOFTWARE HAS BEEN APPROVED FOR PUBLIC RELEASE, UNLIMITED DISTRIBUTION. THE * SOFTWARE IS PROVIDED “AS IS” AND WITHOUT ANY EXPRESS, IMPLIED OR STATUTORY * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, WARRANTIES OF ACCURACY, COMPLETENESS, * NONINFRINGEMENT, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. ACCESS OR USE OF THE SOFTWARE IS ENTIRELY AT THE USER’S RISK. IN * NO EVENT SHALL OHIO STATE UNIVERSITY OR ITS FACULTY, STAFF OR STUDENTS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. THE SOFTWARE * USER SHALL INDEMNIFY, DEFEND AND HOLD HARMLESS OHIO STATE UNIVERSITY AND ITS * FACULTY, STAFF AND STUDENTS FROM ANY AND ALL CLAIMS, ACTIONS, DAMAGES, LOSSES, * LIABILITIES, COSTS AND EXPENSES, INCLUDING ATTORNEYS’ FEES AND COURT COSTS, * DIRECTLY OR INDIRECTLY ARISING OUT OF OR IN CONNECTION WITH ACCESS OR USE OF THE * SOFTWARE. * */ /** * * Author: * Israt Nisa ([email protected]) * * Contacts: * Israt Nisa ([email protected]) * Jiajia Li ([email protected]) * */ #include <iostream> #include "mttkrp_gpu.h" #include <vector> inline cudaError_t checkCuda(cudaError_t result, int s){ if (result != cudaSuccess) { fprintf(stderr, "CUDA Runtime Error in line : %s - %d\n", cudaGetErrorString(result), s); assert(result == cudaSuccess); } return result; } void cuda_timer_start(cudaEvent_t start){ checkCuda(cudaEventRecord(start), __LINE__); } void cuda_timer_stop(cudaEvent_t start, cudaEvent_t stop, float &mili){ checkCuda(cudaEventRecord(stop), __LINE__); cudaEventSynchronize(stop); checkCuda(cudaEventElapsedTime(&mili, start, stop), __LINE__); cudaDeviceSynchronize(); } // CUDA kernel call to do COO MTTKRP __global__ void mttkrp_COO_kernel(DTYPE *vals, ITYPE *dInds0, ITYPE *dInds1, ITYPE *dInds2, ITYPE nnz, DTYPE *dU0, DTYPE *dU1, DTYPE *dU2, ITYPE mode, ITYPE R){ unsigned int tId = threadIdx.x; unsigned int laneId = tId & 31; unsigned int gId = (blockIdx.x * blockDim.x + tId); unsigned int x = gId >> 5; if(x < nnz){ DTYPE tmp_val = 0; ITYPE idx0 = dInds0[x]; ITYPE idx1 = dInds1[x]; ITYPE idx2 = dInds2[x]; for(ITYPE r=laneId; r<R; r+=32) { tmp_val = vals[x] * dU1[idx1 * R + r] * dU2[idx2 * R + r]; atomicAdd(&dU0[idx0 * R + r], tmp_val); } } } // CUDA kernel call to do COO MTTKRP using loop __global__ void mttkrp_COO_kernel_loop(DTYPE * const vals, ITYPE * const dInds0, ITYPE * const dInds1, ITYPE * const dInds2, const ITYPE nnz, DTYPE *dU0, DTYPE * const dU1, DTYPE * const dU2, ITYPE mode, ITYPE R){ unsigned int tId = threadIdx.x; unsigned int laneId = tId & 31; unsigned int gId = (blockIdx.x * blockDim.x + tId); //like PARTI size_t num_loops_nnz = 1 * 32; size_t const nnz_per_loop = gridDim.x * blockDim.x; if(nnz > nnz_per_loop) { num_loops_nnz = ((nnz + nnz_per_loop - 1) / nnz_per_loop) << 5; } unsigned int x; for(size_t nl=0; nl<num_loops_nnz; ++nl) { x = (gId + nl * nnz_per_loop) >> 5; if(x < nnz){ DTYPE tmp_val = 0; ITYPE idx0 = dInds0[x]; ITYPE idx1 = dInds1[x]; ITYPE idx2 = dInds2[x]; for(ITYPE r=laneId; r<R; r+=32) { tmp_val = vals[x] * dU1[idx1 * R + r] * dU2[idx2 * R + r]; atomicAdd(&dU0[idx0 * R + r], tmp_val); } } __syncthreads(); } } // CUDA kernel call to do COO MTTKRP 4D __global__ void mttkrp_COO_kernel_4D(DTYPE *vals, ITYPE *dInds0, ITYPE *dInds1, ITYPE *dInds2, ITYPE *dInds3, ITYPE nnz, DTYPE *dU0, DTYPE *dU1, DTYPE *dU2, DTYPE *dU3, ITYPE mode, ITYPE R){ unsigned int tId = threadIdx.x; unsigned int laneId = tId & 31; unsigned int gId = (blockIdx.x * blockDim.x + tId); unsigned int x = gId >> 5; if(x < nnz){ DTYPE tmp_val = 0; ITYPE idx0 = dInds0[x]; ITYPE idx1 = dInds1[x]; ITYPE idx2 = dInds2[x]; ITYPE idx3 = dInds3[x]; for(ITYPE r=laneId; r<R; r+=32) { tmp_val = vals[x] * dU1[idx1 * R + r] * dU2[idx2 * R + r] * dU3[idx3 * R + r]; atomicAdd(&dU0[idx0 * R + r], tmp_val); } } } // CUDA kernel call to do COO MTTKRP 4D using loop __global__ void mttkrp_COO_kernel_4D_loop(DTYPE *const vals, ITYPE * const dInds0, ITYPE * const dInds1, ITYPE *const dInds2, ITYPE * const dInds3, ITYPE nnz, DTYPE *dU0, DTYPE * const dU1, DTYPE * const dU2, DTYPE * const dU3, ITYPE mode, ITYPE R){ unsigned int tId = threadIdx.x; unsigned int laneId = tId & 31; unsigned int gId = (blockIdx.x * blockDim.x + tId); //like PARTI size_t num_loops_nnz = 1 * 32; size_t const nnz_per_loop = gridDim.x * blockDim.x; if(nnz > nnz_per_loop) { num_loops_nnz = ((nnz + nnz_per_loop - 1) / nnz_per_loop) << 5; } unsigned int x; for(size_t nl=0; nl<num_loops_nnz; ++nl) { x = (gId + nl * nnz_per_loop) >> 5; if(x < nnz){ DTYPE tmp_val = 0; ITYPE idx0 = dInds0[x]; ITYPE idx1 = dInds1[x]; ITYPE idx2 = dInds2[x]; ITYPE idx3 = dInds3[x]; for(ITYPE r=laneId; r<R; r+=32) { tmp_val = vals[x] * dU1[idx1 * R + r] * dU2[idx2 * R + r] * dU3[idx3 * R + r]; atomicAdd(&dU0[idx0 * R + r], tmp_val); } } __syncthreads(); } } //no atomics because all 1 in HYB - COO __global__ void mttkrp_HYB_COO_kernel(DTYPE *vals, ITYPE *dInds0, ITYPE *dInds1, ITYPE *dInds2, ITYPE nnz, DTYPE *dU0, DTYPE *dU1, DTYPE *dU2, ITYPE mode, ITYPE R){ unsigned int tId = threadIdx.x; unsigned int laneId = tId & 31; unsigned int gId = (blockIdx.x * blockDim.x + tId); unsigned int x = gId >> 5; if(x < nnz){ DTYPE tmp_val = 0; ITYPE idx0 = dInds0[x]; ITYPE idx1 = dInds1[x]; ITYPE idx2 = dInds2[x]; for(ITYPE r=laneId; r<R; r+=32) { tmp_val = vals[x] * dU1[idx1 * R + r] * dU2[idx2 * R + r]; dU0[idx0 * R + r] += tmp_val; } } } // CUDA kernel call to do COO MTTKRP using loop __global__ void mttkrp_HYB_COO_kernel_loop(DTYPE * const vals, ITYPE * const dInds0, ITYPE * const dInds1, ITYPE * const dInds2, const ITYPE nnz, DTYPE *dU0, DTYPE * const dU1, DTYPE * const dU2, ITYPE mode, ITYPE R){ unsigned int tId = threadIdx.x; unsigned int laneId = tId & 31; unsigned int gId = (blockIdx.x * blockDim.x + tId); //like PARTI size_t num_loops_nnz = 1 * 32; size_t const nnz_per_loop = gridDim.x * blockDim.x; if(nnz > nnz_per_loop) { num_loops_nnz = ((nnz + nnz_per_loop - 1) / nnz_per_loop) << 5; } unsigned int x; for(size_t nl=0; nl<num_loops_nnz; ++nl) { x = (gId + nl * nnz_per_loop) >> 5; if(x < nnz){ DTYPE tmp_val = 0; ITYPE idx0 = dInds0[x]; ITYPE idx1 = dInds1[x]; ITYPE idx2 = dInds2[x]; for(ITYPE r=laneId; r<R; r+=32) { tmp_val = vals[x] * dU1[idx1 * R + r] * dU2[idx2 * R + r]; dU0[idx0 * R + r] += tmp_val; } } __syncthreads(); } } //no atomics because all 1 in HYB - COO __global__ void mttkrp_HYB_COO_kernel_4D(DTYPE *vals, ITYPE *dInds0, ITYPE *dInds1, ITYPE *dInds2, ITYPE *dInds3, ITYPE nnz, DTYPE *dU0, DTYPE *dU1, DTYPE *dU2, DTYPE *dU3, ITYPE mode, ITYPE R){ unsigned int tId = threadIdx.x; unsigned int laneId = tId & 31; unsigned int gId = (blockIdx.x * blockDim.x + tId); unsigned int x = gId >> 5; if(x < nnz){ DTYPE tmp_val = 0; ITYPE idx0 = dInds0[x]; ITYPE idx1 = dInds1[x]; ITYPE idx2 = dInds2[x]; ITYPE idx3 = dInds3[x]; for(ITYPE r=laneId; r<R; r+=32) { tmp_val = vals[x] * dU1[idx1 * R + r] * dU2[idx2 * R + r] * dU3[idx3 * R + r]; dU0[idx0 * R + r] += tmp_val; } } } // CUDA kernel call to do COO MTTKRP 4D using loop __global__ void mttkrp_HYB_COO_kernel_4D_loop(DTYPE *const vals, ITYPE * const dInds0, ITYPE * const dInds1, ITYPE *const dInds2, ITYPE * const dInds3, ITYPE nnz, DTYPE *dU0, DTYPE * const dU1, DTYPE * const dU2, DTYPE * const dU3, ITYPE mode, ITYPE R){ unsigned int tId = threadIdx.x; unsigned int laneId = tId & 31; unsigned int gId = (blockIdx.x * blockDim.x + tId); //like PARTI size_t num_loops_nnz = 1 * 32; size_t const nnz_per_loop = gridDim.x * blockDim.x; if(nnz > nnz_per_loop) { num_loops_nnz = ((nnz + nnz_per_loop - 1) / nnz_per_loop) << 5; } unsigned int x; for(size_t nl=0; nl<num_loops_nnz; ++nl) { x = (gId + nl * nnz_per_loop) >> 5; if(x < nnz){ DTYPE tmp_val = 0; ITYPE idx0 = dInds0[x]; ITYPE idx1 = dInds1[x]; ITYPE idx2 = dInds2[x]; ITYPE idx3 = dInds3[x]; for(ITYPE r=laneId; r<R; r+=32) { tmp_val = vals[x] * dU1[idx1 * R + r] * dU2[idx2 * R + r] * dU3[idx3 * R + r]; dU0[idx0 * R + r] += tmp_val; } } __syncthreads(); } } __global__ void mttkrp_CSL_kernel(DTYPE * vals, ITYPE *dfbrIdx0, ITYPE *dSlcMapperBin, ITYPE *dInds2, ITYPE *fbrPtr0, ITYPE *dInds1, unsigned int nSlices, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int TbPerSlc, int LogOfTPS){ unsigned int tId = threadIdx.x; unsigned int laneId = tId & 31; unsigned int gId = (blockIdx.x * blockDim.x + tId); unsigned int workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5; unsigned int slc = gId >> (5 + logOfWPC); // 5: minimum 1 WARP (2^5) DTYPE tmp_val; if(slc < nSlices){ unsigned int mappedSlc = slc;//dSlcMapperBin[slc]; unsigned int idx0 = dfbrIdx0[mappedSlc]; int fb_st = fbrPtr0[mappedSlc]; int fb_end = fbrPtr0[mappedSlc+1]; tmp_val = 0; for (int fbr = fb_st + workId; fbr < fb_end; fbr+=warpPerSlice){ unsigned int idx1 = dInds1[fbr]; unsigned int idx2 = dInds2[fbr]; for(unsigned int r=laneId; r<R; r+=32) { tmp_val += vals[fbr] * dU2[idx2 * R + r] * dU1[idx1 * R + r]; } } for(unsigned int r=laneId; r<R; r+=32) { atomicAdd(&dU0[idx0 * R + r], tmp_val); } } } __global__ void mttkrp_CSL_kernel_bin(DTYPE * vals, ITYPE *dfbrIdx0, ITYPE *dSlcMapperBin, ITYPE *dInds2, ITYPE *fbrPtr0, ITYPE *dInds1, unsigned int nSlices, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC){ unsigned int tId = threadIdx.x; unsigned int laneId = tId & 31; unsigned int gId = (blockIdx.x * blockDim.x + tId); unsigned int workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5; unsigned int slc = gId >> (5 + logOfWPC); // 5: minimum 1 WARP (2^5) DTYPE tmp_val; if(slc < nSlices){ unsigned int mappedSlc = dSlcMapperBin[slc]; unsigned int idx0 = dfbrIdx0[mappedSlc]; int fb_st = fbrPtr0[mappedSlc]; int fb_end = fbrPtr0[mappedSlc+1]; tmp_val = 0; for (int fbr = fb_st + workId; fbr < fb_end; fbr+=warpPerSlice){ unsigned int idx1 = dInds1[fbr]; unsigned int idx2 = dInds2[fbr]; for(unsigned int r=laneId; r<R; r+=32) { tmp_val += vals[fbr] * dU2[idx2 * R + r] * dU1[idx1 * R + r]; } } for(unsigned int r=laneId; r<R; r+=32) { atomicAdd(&dU0[idx0 * R + r], tmp_val); } } } // CSL kernel with loop like ParTI __global__ void mttkrp_CSL_kernel_bin_loop(DTYPE * vals, ITYPE *dfbrIdx0, ITYPE *dSlcMapperBin, ITYPE *dInds2, ITYPE *fbrPtr0, ITYPE *dInds1, unsigned int nSlices, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int TbPerSlc, int LogOfTPS){ unsigned int tId = threadIdx.x; unsigned int laneId = tId & 31; unsigned int gId = (blockIdx.x * blockDim.x + tId); unsigned int workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5; unsigned int slc = gId >> (5 + logOfWPC); // 5: minimum 1 WARP (2^5) DTYPE tmp_val; //like PARTI size_t num_loops_nnz = 1 * 32; size_t const nnz_per_loop = gridDim.x * blockDim.x; if(nSlices > nnz_per_loop) { num_loops_nnz = ((nSlices + nnz_per_loop - 1) / nnz_per_loop) << 5; } for(size_t nl=0; nl<num_loops_nnz; ++nl) { slc = (gId + nl * nnz_per_loop) >> 5; if(slc < nSlices){ unsigned int mappedSlc = dSlcMapperBin[slc]; unsigned int idx0 = dfbrIdx0[mappedSlc]; int fb_st = fbrPtr0[mappedSlc]; int fb_end = fbrPtr0[mappedSlc+1]; tmp_val = 0; for (int fbr = fb_st + workId; fbr < fb_end; fbr+=warpPerSlice){ unsigned int idx1 = dInds1[fbr]; unsigned int idx2 = dInds2[fbr]; for(unsigned int r=laneId; r<R; r+=32) { tmp_val += vals[fbr] * dU2[idx2 * R + r] * dU1[idx1 * R + r]; } } for(unsigned int r=laneId; r<R; r+=32) { atomicAdd(&dU0[idx0 * R + r], tmp_val); } } __syncthreads(); } } // CUDA kernel call to do HCSR MTTKRP __global__ void mttkrp_CSL_kernel_hvyBin(DTYPE * vals, ITYPE *dfbrIdx0, ITYPE *dSlcMapperBin, ITYPE *dInds2, ITYPE *fbrPtr0, ITYPE *dInds1, unsigned int nSlices, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int TbPerSlc, int logOfTPS){ unsigned int laneId = threadIdx.x & 31; unsigned int workId = threadIdx.x >> 5; unsigned int slc = blockIdx.x >> logOfTPS; unsigned int localBId = blockIdx.x & (TbPerSlc -1); DTYPE tmp = 0, tmp_val; if(slc < nSlices){ unsigned int mappedSlc = dSlcMapperBin[slc]; unsigned int idx0 = dfbrIdx0[mappedSlc] ;//slc; unsigned int nFbr = fbrPtr0[mappedSlc+1] - fbrPtr0[mappedSlc]; unsigned int fbrPerTb = (nFbr + TbPerSlc - 1 ) >> logOfTPS; unsigned int fb_st = fbrPtr0[mappedSlc] + localBId * fbrPerTb ; unsigned int fb_end = fbrPtr0[mappedSlc] + (localBId + 1) * fbrPerTb ; tmp_val = 0; for (int fbr = fb_st + workId; fbr < fb_end && fbr < fbrPtr0[mappedSlc+1]; fbr+=warpPerSlice){ unsigned int idx1 = dInds1[fbr]; unsigned int idx2 = dInds2[fbr]; for(unsigned int r=laneId; r<R; r+=32) { tmp_val += vals[fbr] * dU2[idx2 * R + r] * dU1[idx1 * R + r]; } } for(unsigned int r=laneId; r<R; r+=32) { atomicAdd(&dU0[idx0 * R + r], tmp_val); } } } // HCSR MTTKRP : 16 WARP = 1 TB per slice __global__ void mttkrp_HCSR_kernel_16WARP(DTYPE * vals, ITYPE *dfbrIdx0, ITYPE *dSlcMapperBin, ITYPE *dInds2, ITYPE *fbrPtr0, ITYPE *fbrPtr1, ITYPE *fbrIdx1, unsigned int nSlices, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC){ unsigned int tId = threadIdx.x; unsigned int laneId = tId & 31; unsigned int gId = (blockIdx.x * blockDim.x + tId); unsigned int workId = tId >> 5; //(tId & ((1 << (5 + logOfWPC)) - 1)) >> 5; unsigned int slc = blockIdx.x ;//gId >> (5 + logOfWPC); // 5: minimum 1 WARP (2^5) DTYPE tmp = 0; DTYPE tmp_val; if(slc < nSlices){ unsigned int mappedSlc = dSlcMapperBin[slc]; unsigned int idx0 = dfbrIdx0[mappedSlc] ;//slc; int fb_st = fbrPtr0[mappedSlc]; int fb_end = fbrPtr0[mappedSlc+1]; for (int fbr = fb_st + workId; fbr < fb_end; fbr+=warpPerSlice){ tmp_val = 0; for(unsigned int x = fbrPtr1[fbr]; x < fbrPtr1[fbr+1]; ++x) { unsigned int idx2 = dInds2[x]; for(unsigned int r=laneId; r<R; r+=32) { tmp_val += vals[x] * dU2[idx2 * R + r]; } } // unsigned int idx1 = dInds1[fbrPtr1[fbr]]; unsigned int idx1 = fbrIdx1[fbr]; for(unsigned int r=laneId; r<R; r+=32) { tmp += tmp_val * dU1[idx1 * R + r] ; // C matrix } } for(unsigned int r=laneId; r<R; r+=32) { atomicAdd(&dU0[idx0 * R + r], tmp); } } } // CUDA kernel call to do HCSR MTTKRP for the first bin 1 WARP per slice __global__ void mttkrp_HCSR_kernel_COO(DTYPE * vals, ITYPE *dfbrIdx0, ITYPE *dSlcMapperBin, ITYPE *dInds2, ITYPE *fbrPtr0, ITYPE *fbrPtr1, ITYPE *fbrIdx1, unsigned int nSlices, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int TbPerSlc, int LogOfTPS){ unsigned int tId = threadIdx.x; unsigned int laneId = tId & 31; unsigned int gId = (blockIdx.x * blockDim.x + tId); unsigned int slc = gId >> 5; // 5: minimum 1 WARP (2^5) DTYPE tmp = 0, tmp_val; if(slc < nSlices){ unsigned int mappedSlc = dSlcMapperBin[slc]; unsigned int idx0 = dfbrIdx0[mappedSlc] ;//slc; int fb_st = fbrPtr0[mappedSlc]; int fb_end = fbrPtr0[mappedSlc+1]; for (int fbr = fb_st; fbr < fb_end; fbr++){ tmp_val = 0; for(unsigned int x = fbrPtr1[fbr]; x < fbrPtr1[fbr+1]; ++x) { unsigned int idx2 = dInds2[x]; for(unsigned int r=laneId; r<R; r+=32) { tmp_val += vals[x] * dU2[idx2 * R + r]; } } unsigned int idx1 = fbrIdx1[fbr]; for(unsigned int r=laneId; r<R; r+=32) { dU0[idx0 * R + r] += tmp_val * dU1[idx1 * R + r] ; } } } } // CUDA kernel call to do HCSR MTTKRP __global__ void mttkrp_HCSR_kernel_smllBin(DTYPE * vals, ITYPE *dfbrIdx0, ITYPE *dSlcMapperBin, ITYPE *dInds2, ITYPE *fbrPtr0, ITYPE *fbrPtr1, ITYPE *fbrIdx1, unsigned int nSlices, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int TbPerSlc, int LogOfTPS){ unsigned int tId = threadIdx.x; unsigned int laneId = tId & 31; unsigned int gId = (blockIdx.x * blockDim.x + tId); unsigned int workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5; unsigned int slc = gId >> (5 + logOfWPC); // 5: minimum 1 WARP (2^5) // unsigned int slcPerTb = 16/warpPerSlice; // unsigned int shSlc = slc & slcPerTb; DTYPE tmp = 0, tmp_val; if(slc < nSlices){ unsigned int mappedSlc = dSlcMapperBin[slc]; unsigned int idx0 = dfbrIdx0[mappedSlc] ;//slc; int fb_st = fbrPtr0[mappedSlc]; int fb_end = fbrPtr0[mappedSlc+1]; for (int fbr = fb_st + workId; fbr < fb_end; fbr+=warpPerSlice){ tmp_val = 0; for(unsigned int x = fbrPtr1[fbr]; x < fbrPtr1[fbr+1]; ++x) { unsigned int idx2 = dInds2[x]; for(unsigned int r=laneId; r<R; r+=32) { tmp_val += vals[x] * dU2[idx2 * R + r]; } } unsigned int idx1 = fbrIdx1[fbr];// dInds1[fbrPtr1[fbr]]; for(unsigned int r=laneId; r<R; r+=32) { tmp += tmp_val * dU1[idx1 * R + r] ; } } for(unsigned int r=laneId; r<R; r+=32) { atomicAdd(&dU0[idx0 * R + r], tmp); } } } // CUDA kernel call to do HCSR MTTKRP __global__ void mttkrp_HCSR_kernel_smllBin_4D(DTYPE * vals, ITYPE *dfbrIdx0, ITYPE *dSlcMapperBin, ITYPE *dInds3, ITYPE *fbrPtr0, ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE *fbrPtr2, ITYPE *fbrIdx2, unsigned int nSlices, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2, DTYPE *dU3, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int TbPerSlc, int LogOfTPS){ unsigned int tId = threadIdx.x; unsigned int laneId = tId & 31; unsigned int gId = (blockIdx.x * blockDim.x + tId); unsigned int workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5; unsigned int slc = gId >> (5 + logOfWPC); // 5: minimum 1 WARP (2^5) DTYPE outbuffer = 0, tmp_val = 0, outbuffer1 = 0; if(slc < nSlices){ unsigned int mappedSlc = dSlcMapperBin[slc]; unsigned int idx0 = dfbrIdx0[mappedSlc] ;//slc; for (int fbrS = fbrPtr0[mappedSlc]; fbrS < fbrPtr0[mappedSlc+1]; fbrS++){ unsigned int idx1 = fbrIdx1[fbrS];// dInds1[fbrPtr1[fbr]]; outbuffer1 = 0; for (int fbr = fbrPtr1[fbrS] + workId; fbr < fbrPtr1[fbrS+1]; fbr+=warpPerSlice){ ITYPE idx2 = fbrIdx2[fbr]; tmp_val = 0; for(unsigned int x = fbrPtr2[fbr]; x < fbrPtr2[fbr+1]; ++x) { unsigned int idx3 = dInds3[x]; for(unsigned int r=laneId; r<R; r+=32) tmp_val += vals[x] * dU3[idx3 * R + r]; } for(unsigned int r=laneId; r<R; r+=32) outbuffer1 += tmp_val * dU2[idx2 * R + r] ; } for(unsigned int r=laneId; r<R; r+=32) outbuffer += outbuffer1 * dU1[idx1 * R + r] ; } for(unsigned int r=laneId; r<R; r+=32) { atomicAdd(&dU0[idx0 * R + r], outbuffer); } } } // CUDA kernel call to do HCSR MTTKRP __global__ void mttkrp_HCSR_kernel_hvyBin(DTYPE * vals, ITYPE *dfbrIdx0, ITYPE *dSlcMapperBin, ITYPE *dInds2, ITYPE *fbrPtr0, ITYPE *fbrPtr1, ITYPE *fbrIdx1, unsigned int nSlices, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int TbPerSlc, int logOfTPS){ unsigned int laneId = threadIdx.x & 31; unsigned int workId = threadIdx.x >> 5; unsigned int slc = blockIdx.x >> logOfTPS; unsigned int localBId = blockIdx.x & (TbPerSlc -1); DTYPE tmp = 0, tmp_val; if(slc < nSlices){ unsigned int mappedSlc = dSlcMapperBin[slc]; unsigned int idx0 = dfbrIdx0[mappedSlc] ;//slc; unsigned int nFbr = fbrPtr0[mappedSlc+1] - fbrPtr0[mappedSlc]; unsigned int fbrPerTb = (nFbr + TbPerSlc - 1 ) >> logOfTPS; unsigned int fb_st = fbrPtr0[mappedSlc] + localBId * fbrPerTb ; unsigned int fb_end = fbrPtr0[mappedSlc] + (localBId + 1) * fbrPerTb ; for (int fbr = fb_st + workId; fbr < fb_end && fbr < fbrPtr0[mappedSlc+1] ; fbr+=warpPerSlice){ tmp_val = 0; for(unsigned int x = fbrPtr1[fbr]; x < fbrPtr1[fbr+1]; ++x) { unsigned int idx2 = dInds2[x]; for(unsigned int r=laneId; r<R; r+=32) { tmp_val += vals[x] * dU2[idx2 * R + r]; } } unsigned int idx1 = fbrIdx1[fbr];//dInds1[fbrPtr1[fbr]]; for(unsigned int r=laneId; r<R; r+=32) { tmp += tmp_val * dU1[idx1 * R + r] ; // // atomicAdd(&dU0[idx0 * R + r], tmp); } } for(unsigned int r=laneId; r<R; r+=32) { atomicAdd(&dU0[idx0 * R + r], tmp); } } } // CUDA kernel call to do HCSR MTTKRP __global__ void mttkrp_HCSR_kernel_hvyBin_4D(DTYPE * vals, ITYPE *dfbrIdx0, ITYPE *dSlcMapperBin, ITYPE *dInds3, ITYPE *fbrPtr0, ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE *fbrPtr2, ITYPE *fbrIdx2, unsigned int nSlices, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2, DTYPE *dU3, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int TbPerSlc, int logOfTPS){ unsigned int laneId = threadIdx.x & 31; unsigned int workId = threadIdx.x >> 5; unsigned int slc = blockIdx.x >> logOfTPS; unsigned int localBId = blockIdx.x & (TbPerSlc -1); DTYPE outbuffer = 0, tmp_val = 0, outbuffer1 = 0;; if(slc < nSlices){ unsigned int mappedSlc = dSlcMapperBin[slc]; unsigned int idx0 = dfbrIdx0[mappedSlc] ;//slc; unsigned int nFbr = fbrPtr0[mappedSlc+1] - fbrPtr0[mappedSlc]; unsigned int fbrPerTb = (nFbr + TbPerSlc - 1 ) >> logOfTPS; unsigned int fb_st = fbrPtr0[mappedSlc] + localBId * fbrPerTb ; unsigned int fb_end = fbrPtr0[mappedSlc] + (localBId + 1) * fbrPerTb ; for (int fbrS = fb_st; fbrS < fb_end && fbrS < fbrPtr0[mappedSlc+1] ; fbrS++){ unsigned int idx1 = fbrIdx1[fbrS];// dInds1[fbrPtr1[fbr]]; outbuffer1 = 0; for (int fbr = fbrPtr1[fbrS] + workId; fbr < fbrPtr1[fbrS+1]; fbr+=warpPerSlice){ ITYPE idx2 = fbrIdx2[fbr]; tmp_val = 0; for(unsigned int x = fbrPtr2[fbr]; x < fbrPtr2[fbr+1]; ++x) { unsigned int idx3 = dInds3[x]; for(unsigned int r=laneId; r<R; r+=32) tmp_val += vals[x] * dU3[idx3 * R + r]; } for(unsigned int r=laneId; r<R; r+=32) outbuffer1 += tmp_val * dU2[idx2 * R + r] ; } for(unsigned int r=laneId; r<R; r+=32) outbuffer += outbuffer1 * dU1[idx1 * R + r] ; } for(unsigned int r=laneId; r<R; r+=32) { atomicAdd(&dU0[idx0 * R + r], outbuffer); } } } // CUDA fbr atomic sing slcLikeFbr __global__ void mttkrp_MIHCSR_kernel_slc_atomic_fbrLvlPar(DTYPE * vals, ITYPE *fbrLikeSlcInds, ITYPE *dInds2, ITYPE *fbrPtr0, ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE nFibers, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int fbrPerWarp, int logOfFPW){ ITYPE tId = threadIdx.x; ITYPE laneId = tId & 31; ITYPE bdim = blockDim.x; ITYPE gId = (blockIdx.x * bdim + tId); ITYPE workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5; //tId >> 5; //tId >> 5;// ITYPE fbr = (gId >> (5 + logOfWPC)) << logOfFPW; // 5: minimum 1 WARP (2^5) // blockIdx.x ;// DTYPE tmp = 0, tmp_val; if(fbr < nFibers - 1){ tmp_val = 0; bool diffFiber = false; unsigned int idx0; for (int fr = 0; fr < fbrPerWarp && (fbr+fr) < (nFibers - 1); ++fr){ diffFiber = false; unsigned int idx1 = fbrIdx1[fbr+fr];// dInds1[fbrPtr1[fbr]]; idx0 = fbrLikeSlcInds[fbr+fr];//slc; tmp_val = 0; for(unsigned int x = fbrPtr1[fbr+fr] + workId; x < fbrPtr1[fbr+fr+1]; x+=warpPerSlice) { unsigned int idx2 = dInds2[x]; for(unsigned int r=laneId; r<R; r+=32) { tmp_val += vals[x] * dU2[idx2 * R + r]; //2MR } } for(unsigned int r=laneId; r<R; r+=32) { tmp += tmp_val * dU1[idx1 * R + r] ; //2PR } if(fbrLikeSlcInds[fbr+fr] != fbrLikeSlcInds[fbr+fr+1]) { diffFiber = true; for(unsigned int r=laneId; r<R; r+=32) { atomicAdd(&dU0[idx0 * R + r], tmp); //2PR } tmp = 0; } } if(!diffFiber) { for(unsigned int r=laneId; r<R; r+=32) { atomicAdd(&dU0[idx0 * R + r], tmp); } } } } // CUDA fbr atomic sing slcLikeFbr __global__ void mttkrp_MIHCSR_kernel_slc_atomic_fbrLvlPar_4D(DTYPE * vals, ITYPE *fbrLikeSlcInds, ITYPE *dInds3, ITYPE *fbrPtr0, ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE *fbrPtr2, ITYPE *fbrIdx2, ITYPE nFibers, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2, DTYPE *dU3, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int fbrPerWarp, int logOfFPW){ ITYPE tId = threadIdx.x; ITYPE laneId = tId & 31; ITYPE bdim = blockDim.x; ITYPE gId = (blockIdx.x * bdim + tId); ITYPE workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5; //tId >> 5; //tId >> 5;// ITYPE fbrS = (gId >> (5 + logOfWPC)) << logOfFPW; // 5: minimum 1 WARP (2^5) // blockIdx.x ;// DTYPE tmp = 0, tmp_val, tmp2= 0; if(fbrS < nFibers - 1){ tmp_val = 0; bool diffFiber = false; unsigned int idx0; for (int fr = 0; fr < fbrPerWarp && (fbrS+fr) < (nFibers - 1); ++fr){ diffFiber = false; unsigned int idx1 = fbrIdx1[fbrS+fr];// dInds1[fbrPtr1[fbr]]; idx0 = fbrLikeSlcInds[fbrS+fr];//slc; tmp = 0; for (int fbr = fbrPtr1[fbrS+fr] + workId; fbr < fbrPtr1[fbrS+fr+1]; fbr+=warpPerSlice){ ITYPE idx2 = fbrIdx2[fbr]; tmp_val = 0; for(unsigned int x = fbrPtr2[fbr]; x < fbrPtr2[fbr+1]; x++) { unsigned int idx3 = dInds3[x]; for(unsigned int r=laneId; r<R; r+=32) { tmp_val += vals[x] * dU3[idx3 * R + r]; //2MR } } for(unsigned int r=laneId; r<R; r+=32) { tmp += tmp_val * dU2[idx2 * R + r] ; } } for(unsigned int r=laneId; r<R; r+=32) { tmp2 += tmp * dU1[idx1 * R + r] ; } if(fbrLikeSlcInds[fbrS+fr] != fbrLikeSlcInds[fbrS+fr+1]) { diffFiber = true; for(unsigned int r=laneId; r<R; r+=32) { atomicAdd(&dU0[idx0 * R + r], tmp2); //2PR } tmp2 = 0; } } if(!diffFiber) { for(unsigned int r=laneId; r<R; r+=32) atomicAdd(&dU0[idx0 * R + r], tmp2); //2PR } } } // CUDA kernel call to do HCSR MTTKRP __global__ void mttkrp_MIHCSR_kernel_smllBin_fbr_atomic(DTYPE * vals, ITYPE *dfbrIdx0, ITYPE *dSlcMapperBin, ITYPE *dInds2, ITYPE *fbrPtr0, ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE nSlices, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int TbPerSlc, int LogOfTPS){ ITYPE tId = threadIdx.x; ITYPE laneId = tId & 31; ITYPE bdim = blockDim.x; ITYPE gId = (blockIdx.x * bdim + tId); ITYPE workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5; ITYPE slc = gId >> (5 + logOfWPC); // 5: minimum 1 WARP (2^5) DTYPE tmp = 0, tmp_val; if(slc < nSlices){ unsigned int mappedSlc = dSlcMapperBin[slc]; unsigned int idx2 = dfbrIdx0[mappedSlc] ;//slc; int fb_st = fbrPtr0[mappedSlc]; int fb_end = fbrPtr0[mappedSlc+1]; for (int fbr = fb_st + workId; fbr < fb_end; fbr+=warpPerSlice){ tmp_val = 0; unsigned int idx0 = fbrIdx1[fbr];// dInds1[fbrPtr1[fbr]]; for(unsigned int x = fbrPtr1[fbr]; x < fbrPtr1[fbr+1]; ++x) { unsigned int idx1 = dInds2[x]; for(unsigned int r=laneId; r<R; r+=32) { tmp_val += vals[x] * dU1[idx1 * R + r]; //2MR } } for(unsigned int r=laneId; r<R; r+=32) { tmp = tmp_val * dU2[idx2 * R + r] ; atomicAdd(&dU0[idx0 * R + r], tmp); //2PR } } } } // CUDA kernel call to do HCSR MTTKRP __global__ void mttkrp_MIHCSR_kernel_smllBin_fbr_atomic_4D(DTYPE * vals, ITYPE *dfbrIdx0, ITYPE *dSlcMapperBin, ITYPE *dInds3, ITYPE *fbrPtr0, ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE *fbrPtr2, ITYPE *fbrIdx2, unsigned int nSlices, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2, DTYPE *dU3, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int TbPerSlc, int LogOfTPS){ unsigned int tId = threadIdx.x; unsigned int laneId = tId & 31; unsigned int gId = (blockIdx.x * blockDim.x + tId); unsigned int workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5; unsigned int slc = gId >> (5 + logOfWPC); // 5: minimum 1 WARP (2^5) DTYPE outbuffer = 0, tmp_val = 0, tmp = 0; if(slc < nSlices){ unsigned int mappedSlc = dSlcMapperBin[slc]; unsigned int idx2 = dfbrIdx0[mappedSlc] ;//slc; for (int fbrS = fbrPtr0[mappedSlc]; fbrS < fbrPtr0[mappedSlc+1]; fbrS++){ unsigned int idx3 = fbrIdx1[fbrS];// dInds1[fbrPtr1[fbr]]; tmp = 0; for (int fbr = fbrPtr1[fbrS] + workId; fbr < fbrPtr1[fbrS+1]; fbr+=warpPerSlice){ ITYPE idx0 = fbrIdx2[fbr]; tmp_val = 0; for(unsigned int x = fbrPtr2[fbr]; x < fbrPtr2[fbr+1]; ++x) { unsigned int idx1 = dInds3[x]; for(unsigned int r=laneId; r<R; r+=32) tmp_val += vals[x] * dU1[idx1 * R + r]; } for(unsigned int r=laneId; r<R; r+=32) { tmp = tmp_val * dU2[idx2 * R + r] * dU3[idx3 * R + r] ; atomicAdd(&dU0[idx0 * R + r], tmp); } } } } } // CUDA kernel call to do HCSR MTTKRP __global__ void mttkrp_MIHCSR_kernel_hvyBin_fbr_atomic_4D(DTYPE * vals, ITYPE *dfbrIdx0, ITYPE *dSlcMapperBin, ITYPE *dInds3, ITYPE *fbrPtr0, ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE *fbrPtr2, ITYPE *fbrIdx2, unsigned int nSlices, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2, DTYPE *dU3, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int TbPerSlc, int logOfTPS){ unsigned int laneId = threadIdx.x & 31; unsigned int workId = threadIdx.x >> 5; unsigned int slc = blockIdx.x >> logOfTPS; unsigned int localBId = blockIdx.x & (TbPerSlc -1); DTYPE outbuffer = 0, tmp_val = 0, tmp = 0;; if(slc < nSlices){ unsigned int mappedSlc = dSlcMapperBin[slc]; unsigned int idx2 = dfbrIdx0[mappedSlc] ;//slc; unsigned int nFbr = fbrPtr0[mappedSlc+1] - fbrPtr0[mappedSlc]; unsigned int fbrPerTb = (nFbr + TbPerSlc - 1 ) >> logOfTPS; unsigned int fb_st = fbrPtr0[mappedSlc] + localBId * fbrPerTb ; unsigned int fb_end = fbrPtr0[mappedSlc] + (localBId + 1) * fbrPerTb ; for (int fbrS = fb_st; fbrS < fb_end && fbrS < fbrPtr0[mappedSlc+1] ; fbrS++){ unsigned int idx3 = fbrIdx1[fbrS];// dInds1[fbrPtr1[fbr]]; tmp = 0; for (int fbr = fbrPtr1[fbrS] + workId; fbr < fbrPtr1[fbrS+1]; fbr+=warpPerSlice){ ITYPE idx0 = fbrIdx2[fbr]; tmp_val = 0; for(unsigned int x = fbrPtr2[fbr]; x < fbrPtr2[fbr+1]; ++x) { unsigned int idx1 = dInds3[x]; for(unsigned int r=laneId; r<R; r+=32) tmp_val += vals[x] * dU1[idx1 * R + r]; } for(unsigned int r=laneId; r<R; r+=32) { tmp = tmp_val * dU2[idx2 * R + r] * dU3[idx3 * R + r] ; atomicAdd(&dU0[idx0 * R + r], tmp); } } } } } // CUDA kernel call to do HCSR MTTKRP __global__ void mttkrp_MIHCSR_kernel_smllBin_fbrS_atomic_4D(DTYPE * vals, ITYPE *dfbrIdx0, ITYPE *dSlcMapperBin, ITYPE *dInds3, ITYPE *fbrPtr0, ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE *fbrPtr2, ITYPE *fbrIdx2, unsigned int nSlices, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2, DTYPE *dU3, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int TbPerSlc, int LogOfTPS){ unsigned int tId = threadIdx.x; unsigned int laneId = tId & 31; unsigned int gId = (blockIdx.x * blockDim.x + tId); unsigned int workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5; unsigned int slc = gId >> (5 + logOfWPC); // 5: minimum 1 WARP (2^5) DTYPE tmp = 0, tmp_val, tmp2 = 0; if(slc < nSlices){ unsigned int mappedSlc = dSlcMapperBin[slc]; unsigned int idx3 = dfbrIdx0[mappedSlc] ;//slc; for (int fbrS = fbrPtr0[mappedSlc]; fbrS < fbrPtr0[mappedSlc+1]; fbrS++){ unsigned int idx0 = fbrIdx1[fbrS];// dInds1[fbrPtr1[fbr]]; tmp = 0; for (int fbr = fbrPtr1[fbrS] + workId; fbr < fbrPtr1[fbrS+1]; fbr+=warpPerSlice){ ITYPE idx1 = fbrIdx2[fbr]; tmp_val = 0; for(unsigned int x = fbrPtr2[fbr]; x < fbrPtr2[fbr+1]; ++x) { unsigned int idx2 = dInds3[x]; for(unsigned int r=laneId; r<R; r+=32) tmp_val += vals[x] * dU2[idx2 * R + r]; } for(unsigned int r=laneId; r<R; r+=32) tmp += tmp_val * dU1[idx1 * R + r] ; } for(unsigned int r=laneId; r<R; r+=32) { tmp2 = tmp * dU3[idx3 * R + r]; atomicAdd(&dU0[idx0 * R + r], tmp2); //2PR } } } } // CUDA kernel call to do HCSR MTTKRP __global__ void mttkrp_MIHCSR_kernel_hvyBin_fbrS_atomic_4D(DTYPE * vals, ITYPE *dfbrIdx0, ITYPE *dSlcMapperBin, ITYPE *dInds3, ITYPE *fbrPtr0, ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE *fbrPtr2, ITYPE *fbrIdx2, unsigned int nSlices, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2, DTYPE *dU3, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int TbPerSlc, int logOfTPS){ unsigned int laneId = threadIdx.x & 31; unsigned int workId = threadIdx.x >> 5; unsigned int slc = blockIdx.x >> logOfTPS; unsigned int localBId = blockIdx.x & (TbPerSlc -1); DTYPE tmp = 0, tmp_val, tmp2 = 0; if(slc < nSlices){ unsigned int mappedSlc = dSlcMapperBin[slc]; unsigned int idx3 = dfbrIdx0[mappedSlc] ;//slc; unsigned int nFbr = fbrPtr0[mappedSlc+1] - fbrPtr0[mappedSlc]; unsigned int fbrPerTb = (nFbr + TbPerSlc - 1 ) >> logOfTPS; unsigned int fb_st = fbrPtr0[mappedSlc] + localBId * fbrPerTb ; unsigned int fb_end = fbrPtr0[mappedSlc] + (localBId + 1) * fbrPerTb ; for (int fbrS = fb_st; fbrS < fb_end && fbrS < fbrPtr0[mappedSlc+1] ; fbrS++){ unsigned int idx0 = fbrIdx1[fbrS];// dInds1[fbrPtr1[fbr]]; tmp = 0; for (int fbr = fbrPtr1[fbrS] + workId; fbr < fbrPtr1[fbrS+1]; fbr+=warpPerSlice){ ITYPE idx1 = fbrIdx2[fbr]; tmp_val = 0; for(unsigned int x = fbrPtr2[fbr]; x < fbrPtr2[fbr+1]; ++x) { unsigned int idx2 = dInds3[x]; for(unsigned int r=laneId; r<R; r+=32) tmp_val += vals[x] * dU2[idx2 * R + r]; } for(unsigned int r=laneId; r<R; r+=32) tmp += tmp_val * dU1[idx1 * R + r] ; } for(unsigned int r=laneId; r<R; r+=32) { tmp2 = tmp * dU3[idx3 * R + r]; atomicAdd(&dU0[idx0 * R + r], tmp2); //2PR } } } } // CUDA fbr atomic sing slcLikeFbr __global__ void mttkrp_MIHCSR_kernel_fbr_atomic_fbrLvlPar(DTYPE * vals, ITYPE *fbrLikeSlcInds, ITYPE *dInds2, ITYPE *fbrPtr0, ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE nFibers, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC){ ITYPE tId = threadIdx.x; ITYPE laneId = tId & 31; ITYPE bdim = blockDim.x; ITYPE gId = (blockIdx.x * bdim + tId); ITYPE workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5; //tId >> 5; //tId >> 5;// ITYPE fbr = gId >> (5 + logOfWPC); // 5: minimum 1 WARP (2^5) // blockIdx.x ;// DTYPE tmp = 0, tmp_val; if(fbr < nFibers - 1){ tmp_val = 0; unsigned int idx0 = fbrIdx1[fbr];// dInds1[fbrPtr1[fbr]]; unsigned int idx2 = fbrLikeSlcInds[fbr];//slc; for(unsigned int x = fbrPtr1[fbr] + workId; x < fbrPtr1[fbr+1]; x+=warpPerSlice) { unsigned int idx1 = dInds2[x]; for(unsigned int r=laneId; r<R; r+=32) { tmp_val += vals[x] * dU1[idx1 * R + r]; //2MR } } for(unsigned int r=laneId; r<R; r+=32) { tmp = tmp_val * dU2[idx2 * R + r] ; atomicAdd(&dU0[idx0 * R + r], tmp); //2PR } } } // CUDA fbr atomic sing slcLikeFbr __global__ void mttkrp_MIHCSR_kernel_fbr_atomic_fbrLvlPar_4D(DTYPE * vals, ITYPE *fbrLikeSlcInds, ITYPE *dInds3, ITYPE *fbrPtr0, ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE *fbrPtr2, ITYPE *fbrIdx2, ITYPE nFibers, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2, DTYPE *dU3, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC){ ITYPE tId = threadIdx.x; ITYPE laneId = tId & 31; ITYPE bdim = blockDim.x; ITYPE gId = (blockIdx.x * bdim + tId); ITYPE workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5; //tId >> 5; //tId >> 5;// ITYPE fbrS = gId >> (5 + logOfWPC); // 5: minimum 1 WARP (2^5) // blockIdx.x ;// DTYPE tmp = 0, tmp_val, tmp2 = 0; if(fbrS < nFibers - 1){ tmp = 0; unsigned int idx2 = fbrLikeSlcInds[fbrS];//slc; unsigned int idx3 = fbrIdx1[fbrS];// dInds1[fbrPtr1[fbr]]; for (int fbr = fbrPtr1[fbrS] + workId; fbr < fbrPtr1[fbrS+1]; fbr+=warpPerSlice){ unsigned int idx0 = fbrIdx2[fbr]; tmp_val = 0; for(unsigned int x = fbrPtr2[fbr]; x < fbrPtr2[fbr+1]; ++x) { unsigned int idx1 = dInds3[x]; for(unsigned int r=laneId; r<R; r+=32) tmp_val += vals[x] * dU1[idx1 * R + r]; //2MR // if(laneId == 0) // printf("from GPU: (%d %d %d %d) - %f %f %f %f \n", idx0, idx1, idx2, idx3, dU0[idx0 * R] , dU1[idx1 * R], dU2[idx2 * R], dU3[idx3 * R]); } for(unsigned int r=laneId; r<R; r+=32) { tmp = tmp_val * dU2[idx2 * R + r] * dU3[idx3 * R + r] ; atomicAdd(&dU0[idx0 * R + r], tmp); } } } } // CUDA fbr atomic sing slcLikeFbr __global__ void mttkrp_MIHCSR_kernel_fbrS_atomic_fbrLvlPar_4D(DTYPE * vals, ITYPE *fbrLikeSlcInds, ITYPE *dInds3, ITYPE *fbrPtr0, ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE *fbrPtr2, ITYPE *fbrIdx2, ITYPE nFibers, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2, DTYPE *dU3, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC){ ITYPE tId = threadIdx.x; ITYPE laneId = tId & 31; ITYPE bdim = blockDim.x; ITYPE gId = (blockIdx.x * bdim + tId); ITYPE workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5; //tId >> 5; //tId >> 5;// ITYPE fbrS = gId >> (5 + logOfWPC); // 5: minimum 1 WARP (2^5) // blockIdx.x ;// DTYPE tmp = 0, tmp_val, tmp2 = 0; if(fbrS < nFibers - 1){ tmp = 0; unsigned int idx0 = fbrIdx1[fbrS];// dInds1[fbrPtr1[fbr]]; unsigned int idx3 = fbrLikeSlcInds[fbrS];//slc; for (int fbr = fbrPtr1[fbrS] + workId; fbr < fbrPtr1[fbrS+1]; fbr+=warpPerSlice){ unsigned int idx1 = fbrIdx2[fbr]; tmp_val = 0; for(unsigned int x = fbrPtr2[fbr]; x < fbrPtr2[fbr+1]; ++x) { unsigned int idx2 = dInds3[x]; for(unsigned int r=laneId; r<R; r+=32) tmp_val += vals[x] * dU2[idx2 * R + r] ; //2MR } for(unsigned int r=laneId; r<R; r+=32) tmp += tmp_val * dU1[idx1 * R + r] ; } for(unsigned int r=laneId; r<R; r+=32) { tmp2 = tmp * dU3[idx3 * R + r]; atomicAdd(&dU0[idx0 * R + r], tmp2); //2PR } } } // CUDA fbr atomic sing slcLikeFbr __global__ void mttkrp_MIHCSR_kernel_fbr_atomic_fbrLvlPar_loop(DTYPE * vals, ITYPE *fbrLikeSlcInds, ITYPE *dInds2, ITYPE *fbrPtr0, ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE nFibers, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC){ ITYPE tId = threadIdx.x; ITYPE laneId = tId & 31; ITYPE bdim = blockDim.x; ITYPE gId = (blockIdx.x * bdim + tId); //like PARTI //hardcoded for 1 warp per nnz size_t num_loops_fbr = 1 * 32; size_t const fbr_per_loop = gridDim.x * blockDim.x; if(nFibers > fbr_per_loop) { num_loops_fbr = ((nFibers + fbr_per_loop - 1) / fbr_per_loop) << 5; } DTYPE tmp = 0, tmp_val; unsigned int fbr; for(size_t nl=0; nl<num_loops_fbr; ++nl) { fbr = (gId + nl * fbr_per_loop) >> 5; if(fbr < nFibers - 1){ tmp_val = 0; unsigned int idx0 = fbrIdx1[fbr];// dInds1[fbrPtr1[fbr]]; unsigned int idx2 = fbrLikeSlcInds[fbr];//slc; for(unsigned int x = fbrPtr1[fbr]; x < fbrPtr1[fbr+1]; x++) { unsigned int idx1 = dInds2[x]; for(unsigned int r=laneId; r<R; r+=32) { tmp_val += vals[x] * dU1[idx1 * R + r]; //2MR } } for(unsigned int r=laneId; r<R; r+=32) { tmp = tmp_val * dU2[idx2 * R + r] ; atomicAdd(&dU0[idx0 * R + r], tmp); //2PR } } } } // CUDA kernel call to do HCSR MTTKRP __global__ void mttkrp_MIHCSR_kernel_hvyBin_fbr_atomic(DTYPE * vals, ITYPE *dfbrIdx0, ITYPE *dSlcMapperBin, ITYPE *dInds2, ITYPE *fbrPtr0, ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE nSlices, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int TbPerSlc, int logOfTPS){ ITYPE laneId = threadIdx.x & 31; ITYPE workId = threadIdx.x >> 5; ITYPE slc = blockIdx.x >> logOfTPS; ITYPE localBId = blockIdx.x & (TbPerSlc -1); DTYPE tmp = 0, tmp_val; if(slc < nSlices){ unsigned int mappedSlc = dSlcMapperBin[slc]; unsigned int idx2 = dfbrIdx0[mappedSlc] ;//slc; unsigned int nFbr = fbrPtr0[mappedSlc+1] - fbrPtr0[mappedSlc]; unsigned int fbrPerTb = (nFbr + TbPerSlc - 1 ) >> logOfTPS; unsigned int fb_st = fbrPtr0[mappedSlc] + localBId * fbrPerTb ; unsigned int fb_end = fbrPtr0[mappedSlc] + (localBId + 1) * fbrPerTb ; for (int fbr = fb_st + workId; fbr < fb_end && fbr < fbrPtr0[mappedSlc+1]; fbr+=warpPerSlice){ tmp_val = 0; unsigned int idx0 = fbrIdx1[fbr];// dInds1[fbrPtr1[fbr]]; for(unsigned int x = fbrPtr1[fbr]; x < fbrPtr1[fbr+1]; ++x) { unsigned int idx1 = dInds2[x]; for(unsigned int r=laneId; r<R; r+=32) { tmp_val += vals[x] * dU1[idx1 * R + r]; } } for(unsigned int r=laneId; r<R; r+=32) { tmp = tmp_val * dU2[idx2 * R + r] ; atomicAdd(&dU0[idx0 * R + r], tmp); } } } } // CUDA kernel call to do HCSR MTTKRP __global__ void mttkrp_MIHCSR_kernel_smllBin_all_atomic(DTYPE * vals, ITYPE *dfbrIdx0, ITYPE *dSlcMapperBin, ITYPE *dInds2, ITYPE *fbrPtr0, ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE nSlices, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int TbPerSlc, int LogOfTPS){ ITYPE tId = threadIdx.x; ITYPE laneId = tId & 31; ITYPE bdim = blockDim.x; ITYPE gId = (blockIdx.x * bdim + tId); ITYPE workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5; ITYPE slc = gId >> (5 + logOfWPC); // 5: minimum 1 WARP (2^5) // ITYPE slcPerTb = 16/warpPerSlice; // ITYPE shSlc = slc & slcPerTb; DTYPE tmp_val; if(slc < nSlices){ unsigned int mappedSlc = dSlcMapperBin[slc]; unsigned int idx1 = dfbrIdx0[mappedSlc] ;//slc; int fb_st = fbrPtr0[mappedSlc]; int fb_end = fbrPtr0[mappedSlc+1]; for (int fbr = fb_st + workId; fbr < fb_end; fbr+=warpPerSlice){ unsigned int idx2 = fbrIdx1[fbr];// dInds1[fbrPtr1[fbr]]; // for(unsigned int r=laneId; r<R; r+=32) // tmp_val = dU1[idx1 * R + r] * dU2[idx2 * R + r] ; //1PR for(unsigned int x = fbrPtr1[fbr]; x < fbrPtr1[fbr+1]; ++x) { unsigned int idx0 = dInds2[x]; for(unsigned int r=laneId; r<R; r+=32) { tmp_val = vals[x] * dU1[idx1 * R + r] * dU2[idx2 * R + r] ; atomicAdd(&dU0[idx0 * R + r], tmp_val); //2MR // atomicAdd(&dU0[idx0 * R + r], (tmp_val * vals[x]) ); } } } } } // CUDA kernel call to do HCSR MTTKRP __global__ void mttkrp_MIHCSR_kernel_hvyBin_all_atomic(DTYPE * vals, ITYPE *dfbrIdx0, ITYPE *dSlcMapperBin, ITYPE *dInds2, ITYPE *fbrPtr0, ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE nSlices, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int TbPerSlc, int logOfTPS){ ITYPE laneId = threadIdx.x & 31; ITYPE workId = threadIdx.x >> 5; ITYPE slc = blockIdx.x >> logOfTPS; ITYPE localBId = blockIdx.x & (TbPerSlc -1); DTYPE tmp = 0, tmp_val; if(slc < nSlices){ unsigned int mappedSlc = dSlcMapperBin[slc]; unsigned int idx1 = dfbrIdx0[mappedSlc] ;//slc; unsigned int nFbr = fbrPtr0[mappedSlc+1] - fbrPtr0[mappedSlc]; unsigned int fbrPerTb = (nFbr + TbPerSlc - 1 ) >> logOfTPS; unsigned int fb_st = fbrPtr0[mappedSlc] + localBId * fbrPerTb ; unsigned int fb_end = fbrPtr0[mappedSlc] + (localBId + 1) * fbrPerTb ; for (int fbr = fb_st + workId; fbr < fb_end && fbr < fbrPtr0[mappedSlc+1]; fbr+=warpPerSlice){ tmp_val = 0; unsigned int idx2 = fbrIdx1[fbr];// dInds1[fbrPtr1[fbr]]; for(unsigned int r=laneId; r<R; r+=32) tmp_val = dU1[idx1 * R + r] * dU2[idx2 * R + r] ; for(unsigned int x = fbrPtr1[fbr]; x < fbrPtr1[fbr+1]; ++x) { unsigned int idx0 = dInds2[x]; for(unsigned int r=laneId; r<R; r+=32) { // atomicAdd(&dU0[idx0 * R + r], (tmp_val * vals[x]) ); tmp_val = vals[x] * dU1[idx1 * R + r] * dU2[idx2 * R + r] ; atomicAdd(&dU0[idx0 * R + r], tmp_val); } } } } } // CUDA kernel call to do HCSR MTTKRP __global__ void mttkrp_MIHCSR_kernel_smllBin_all_atomic_4D(DTYPE * vals, ITYPE *dfbrIdx0, ITYPE *dSlcMapperBin, ITYPE *dInds3, ITYPE *fbrPtr0, ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE *fbrPtr2, ITYPE *fbrIdx2, unsigned int nSlices, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2, DTYPE *dU3, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int TbPerSlc, int LogOfTPS){ unsigned int tId = threadIdx.x; unsigned int laneId = tId & 31; unsigned int gId = (blockIdx.x * blockDim.x + tId); unsigned int workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5; unsigned int slc = gId >> (5 + logOfWPC); // 5: minimum 1 WARP (2^5) DTYPE outbuffer = 0, tmp_val = 0, tmp = 0; if(slc < nSlices){ unsigned int mappedSlc = dSlcMapperBin[slc]; unsigned int idx1 = dfbrIdx0[mappedSlc] ;//slc; for (int fbrS = fbrPtr0[mappedSlc]; fbrS < fbrPtr0[mappedSlc+1]; fbrS++){ unsigned int idx2 = fbrIdx1[fbrS];// dInds1[fbrPtr1[fbr]]; for(unsigned int r=laneId; r<R; r+=32) tmp_val = dU1[idx1 * R + r] * dU2[idx2 * R + r] ; //1PR for (int fbr = fbrPtr1[fbrS] + workId; fbr < fbrPtr1[fbrS+1]; fbr+=warpPerSlice){ ITYPE idx3 = fbrIdx2[fbr]; for(unsigned int x = fbrPtr2[fbr]; x < fbrPtr2[fbr+1]; ++x) { unsigned int idx0 = dInds3[x]; for(unsigned int r=laneId; r<R; r+=32) { tmp = vals[x] * dU3[idx3 * R + r] * tmp_val;//2MR atomicAdd(&dU0[idx0 * R + r], tmp); } } } } } } // CUDA kernel call to do HCSR MTTKRP __global__ void mttkrp_MIHCSR_kernel_hvyBin_all_atomic_4D(DTYPE * vals, ITYPE *dfbrIdx0, ITYPE *dSlcMapperBin, ITYPE *dInds3, ITYPE *fbrPtr0, ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE *fbrPtr2, ITYPE *fbrIdx2, unsigned int nSlices, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2, DTYPE *dU3, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int TbPerSlc, int logOfTPS){ unsigned int laneId = threadIdx.x & 31; unsigned int workId = threadIdx.x >> 5; unsigned int slc = blockIdx.x >> logOfTPS; unsigned int localBId = blockIdx.x & (TbPerSlc -1); DTYPE tmp = 0, tmp_val; if(slc < nSlices){ unsigned int mappedSlc = dSlcMapperBin[slc]; unsigned int idx1 = dfbrIdx0[mappedSlc] ;//slc; unsigned int nFbr = fbrPtr0[mappedSlc+1] - fbrPtr0[mappedSlc]; unsigned int fbrPerTb = (nFbr + TbPerSlc - 1 ) >> logOfTPS; unsigned int fb_st = fbrPtr0[mappedSlc] + localBId * fbrPerTb ; unsigned int fb_end = fbrPtr0[mappedSlc] + (localBId + 1) * fbrPerTb ; for (int fbrS = fb_st; fbrS < fb_end && fbrS < fbrPtr0[mappedSlc+1] ; fbrS++){ unsigned int idx2 = fbrIdx1[fbrS];// dInds1[fbrPtr1[fbr]]; for(unsigned int r=laneId; r<R; r+=32) tmp_val = dU1[idx1 * R + r] * dU2[idx2 * R + r] ; //1PR for (int fbr = fbrPtr1[fbrS] + workId; fbr < fbrPtr1[fbrS+1]; fbr+=warpPerSlice){ ITYPE idx3 = fbrIdx2[fbr]; for(unsigned int x = fbrPtr2[fbr]; x < fbrPtr2[fbr+1]; ++x) { unsigned int idx0 = dInds3[x]; for(unsigned int r=laneId; r<R; r+=32) { tmp = vals[x] * dU3[idx3 * R + r] * tmp_val;//2MR atomicAdd(&dU0[idx0 * R + r], tmp); } } } } } } // CUDA fbr atomic sing slcLikeFbr __global__ void mttkrp_MIHCSR_kernel_all_atomic_fbrLvlPar(DTYPE * vals, ITYPE *fbrLikeSlcInds, ITYPE *dInds2, ITYPE *fbrPtr0, ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE nFibers, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC){ ITYPE tId = threadIdx.x; ITYPE laneId = tId & 31; ITYPE bdim = blockDim.x; ITYPE gId = (blockIdx.x * bdim + tId); ITYPE workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5; //tId >> 5; //tId >> 5;// ITYPE fbr = gId >> (5 + logOfWPC); // 5: minimum 1 WARP (2^5) // blockIdx.x ;// DTYPE tmp = 0, tmp_val; if(fbr < nFibers - 1){ tmp_val = 0; unsigned int idx1 = fbrLikeSlcInds[fbr];//slc; unsigned int idx2 = fbrIdx1[fbr];// dInds1[fbrPtr1[fbr]]; // if(laneId == 0 && idx1 == 0) // printf("GPU %d %d %f %f\n", idx1, idx2, dU1[idx1 * R], dU2[idx2 * R] ); for(unsigned int r=laneId; r<R; r+=32) tmp = dU1[idx1 * R + r] * dU2[idx2 * R + r] ; //1PR for(unsigned int x = fbrPtr1[fbr] + workId; x < fbrPtr1[fbr+1]; x+=warpPerSlice) { unsigned int idx0 = dInds2[x]; for(unsigned int r=laneId; r<R; r+=32) { tmp_val = vals[x] * tmp;///dU1[idx1 * R + r] * dU2[idx2 * R + r] ; //2MR atomicAdd(&dU0[idx0 * R + r], tmp_val); } } } } // CUDA fbr atomic sing slcLikeFbr __global__ void mttkrp_MIHCSR_kernel_all_atomic_fbrLvlPar_4D(DTYPE * vals, ITYPE *fbrLikeSlcInds, ITYPE *dInds3, ITYPE *fbrPtr0, ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE *fbrPtr2, ITYPE *fbrIdx2, ITYPE nFibers, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2, DTYPE *dU3, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC){ ITYPE tId = threadIdx.x; ITYPE laneId = tId & 31; ITYPE bdim = blockDim.x; ITYPE gId = (blockIdx.x * bdim + tId); ITYPE workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5; //tId >> 5; //tId >> 5;// ITYPE fbrS = gId >> (5 + logOfWPC); // 5: minimum 1 WARP (2^5) // blockIdx.x ;// DTYPE tmp = 0, tmp_val = 0;; if(fbrS < nFibers - 1){ tmp = 0; unsigned int idx1 = fbrLikeSlcInds[fbrS];//slc; unsigned int idx2 = fbrIdx1[fbrS];// dInds1[fbrPtr1[fbr]]; for(unsigned int r=laneId; r<R; r+=32) tmp_val = dU1[idx1 * R + r] * dU2[idx2 * R + r] ; //1PR for (int fbr = fbrPtr1[fbrS] + workId; fbr < fbrPtr1[fbrS+1]; fbr+=warpPerSlice){ ITYPE idx3 = fbrIdx2[fbr]; for(unsigned int x = fbrPtr2[fbr]; x < fbrPtr2[fbr+1]; ++x) { unsigned int idx0 = dInds3[x]; for(unsigned int r=laneId; r<R; r+=32) { tmp = vals[x] * dU3[idx3 * R + r] * tmp_val;//2MR atomicAdd(&dU0[idx0 * R + r], tmp); } } } } } // CUDA fbr atomic sing slcLikeFbr __global__ void mttkrp_MIHCSR_kernel_all_atomic_fbrLvlPar_loop(DTYPE * vals, ITYPE *fbrLikeSlcInds, ITYPE *dInds2, ITYPE *fbrPtr0, ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE nFibers, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC){ ITYPE tId = threadIdx.x; ITYPE laneId = tId & 31; ITYPE bdim = blockDim.x; ITYPE gId = (blockIdx.x * bdim + tId); ITYPE warpId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5; //tId >> 5; // ITYPE blockId = gId >> (5 + logOfWPC); // 5: minimum 1 WARP (2^5) //blockIdx.x ;// //like PARTI //hardcoded for 1 warp per nnz size_t num_loops_fbr = 1 * 32; size_t const fbr_per_loop = gridDim.x * blockDim.x; if(nFibers > fbr_per_loop) { num_loops_fbr = ((nFibers + fbr_per_loop - 1) / fbr_per_loop) << 5; } DTYPE tmp = 0, tmp_val; unsigned int fbr; for(size_t nl=0; nl<num_loops_fbr; ++nl) { fbr = (gId + nl * fbr_per_loop) >> 5; if(fbr < nFibers - 1){ tmp_val = 0; unsigned int idx2 = fbrIdx1[fbr];// dInds1[fbrPtr1[fbr]]; unsigned int idx1 = fbrLikeSlcInds[fbr];//slc; for(unsigned int r=laneId; r<R; r+=32) tmp = dU1[idx1 * R + r] * dU2[idx2 * R + r] ; //1PR for(unsigned int x = fbrPtr1[fbr] + warpId; x < fbrPtr1[fbr+1]; x+=warpPerSlice) { unsigned int idx0 = dInds2[x]; for(unsigned int r=laneId; r<R; r+=32) { tmp_val = vals[x] * tmp;///dU1[idx1 * R + r] * dU2[idx2 * R + r] ; //2MR atomicAdd(&dU0[idx0 * R + r], tmp_val); } } } } } int MTTKRP_COO_GPU(const Tensor &X, Matrix *U, const Options Opt){ //allocate and memcpy GPU memory //Tensor ITYPE mode = Opt.mode; ITYPE R = Opt.R; ITYPE *dInds0, *dInds1, *dInds2, *dInds3; DTYPE *dVals; ITYPE mode0 = X.modeOrder[0]; ITYPE mode1 = X.modeOrder[1]; ITYPE mode2 = X.modeOrder[2]; checkCuda(cudaMalloc((void**) &dVals, X.totNnz * sizeof(DTYPE)), 0); checkCuda(cudaMalloc((void**) &dInds0, X.totNnz * sizeof(ITYPE)), 0); checkCuda(cudaMalloc((void**) &dInds1, X.totNnz * sizeof(ITYPE)), 0); checkCuda(cudaMalloc((void**) &dInds2, X.totNnz * sizeof(ITYPE)), 0); checkCuda(cudaMemcpy(dVals, &(X.vals[0]), X.totNnz * sizeof(DTYPE),cudaMemcpyHostToDevice), 0); checkCuda(cudaMemcpy(dInds0, &(X.inds[mode0][0]), X.totNnz * sizeof(ITYPE),cudaMemcpyHostToDevice), 0); checkCuda(cudaMemcpy(dInds1, &(X.inds[mode1][0]), X.totNnz * sizeof(ITYPE) ,cudaMemcpyHostToDevice), 0); checkCuda(cudaMemcpy(dInds2, &(X.inds[mode2][0]), X.totNnz * sizeof(ITYPE),cudaMemcpyHostToDevice), 0); // //Matrices DTYPE *dU0, *dU1, *dU2, *dU3; checkCuda(cudaMalloc((void**) &dU0, U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE)), 0); checkCuda(cudaMalloc((void**) &dU1, U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE)), 0); checkCuda(cudaMalloc((void**) &dU2, U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE)), 0); cudaMemset(dU0, 0, U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE)); checkCuda(cudaMemcpy(dU1, &(U[mode1].vals[0]), U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice), 0); checkCuda(cudaMemcpy(dU2, &(U[mode2].vals[0]), U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice), 0); if(X.ndims == 4){ ITYPE mode3 = X.modeOrder[3]; checkCuda(cudaMalloc((void**) &dInds3, X.totNnz * sizeof(ITYPE)), 0); checkCuda(cudaMemcpy(dInds3, &(X.inds[mode3][0]), X.totNnz * sizeof(ITYPE),cudaMemcpyHostToDevice), 0); checkCuda(cudaMalloc((void**) &dU3, U[mode3].nRows * U[mode3].nCols * sizeof(DTYPE)), 0); checkCuda(cudaMemcpy(dU3, &(U[mode3].vals[0]), U[mode3].nRows * U[mode3].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice), 0); } // BLOCK and GRID int BLOCKSIZE = 128; dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); float mili = 0; bool useLoop = true; // /* Like PARTI loop */ = if(useLoop) grid.x = 32768; else grid.x = (32 * X.totNnz + BLOCKSIZE - 1) / BLOCKSIZE; // CUDA call cuda_timer_start(start); if(!useLoop){ if(X.ndims == 3) mttkrp_COO_kernel<<<grid, block>>>(dVals, dInds0, dInds1, dInds2, X.totNnz, dU0, dU1, dU2, mode, R); else if(X.ndims == 4) mttkrp_COO_kernel_4D<<<grid, block>>>(dVals, dInds0, dInds1, dInds2, dInds3, X.totNnz, dU0, dU1, dU2, dU3, mode, R); } // /* loop like ParTI */ else{ if(X.ndims == 3) mttkrp_COO_kernel_loop<<<grid, block>>>(dVals, dInds0, dInds1, dInds2, X.totNnz, dU0, dU1, dU2, mode, R ); else if(X.ndims == 4) mttkrp_COO_kernel_4D_loop<<<grid, block>>>(dVals, dInds0, dInds1, dInds2, dInds3, X.totNnz, dU0, dU1, dU2, dU3, mode, R); } cuda_timer_stop(start, stop, mili); if(useLoop) cout << "Loop on. "; cout << "COO GPU using loop - time " << mili << "ms"<< endl; // check correctness checkCuda(cudaMemcpy(&U[mode0].vals[0], dU0, U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE), cudaMemcpyDeviceToHost), 0); // print_output(U, 0); cudaFree(dVals); cudaFree(dU0); cudaFree(dU1); cudaFree(dU2); cudaFree(dU3); cudaFree(dInds0); cudaFree(dInds1); cudaFree(dInds2); cudaFree(dInds3); return 0; } int MTTKRP_HCSR_GPU(Tensor &X, Matrix *U, const Options &Opt){ //allocate and memcpy GPU memory cout << "FIX fiber idx" << endl; //Tensor ITYPE *dInds2, *dInds3, *dfbrPtr0, *dfbrIdx0, *dfbrPtr1, *dfbrIdx1, *dFbrPtr2, *dFbrIdx2, *dSlcMapperBin; DTYPE *dVals; int logOfWarpPerSlice = log2(Opt.warpPerSlice); int TbPerSlc = 1; int logOfTPS = log2(TbPerSlc); ITYPE mode0 = X.modeOrder[0]; ITYPE mode1 = X.modeOrder[1]; ITYPE mode2 = X.modeOrder[2]; // dummy bin mapper to be compatible with bin mapper when bin are not used X.slcMapperBin.push_back(std::vector<ITYPE>()); for (int s = 0; s < X.fbrIdx[0].size(); ++s) X.slcMapperBin[0].push_back(s); checkCuda(cudaMalloc((void**) &dVals, X.totNnz * sizeof(DTYPE)), 0); checkCuda(cudaMalloc((void**) &dSlcMapperBin, X.slcMapperBin[0].size() * sizeof(ITYPE)), 0); checkCuda(cudaMalloc((void**) &dfbrIdx0, X.fbrIdx[0].size() * sizeof(ITYPE)), 0); checkCuda(cudaMalloc((void**) &dfbrPtr0, X.fbrPtr[0].size() * sizeof(ITYPE)), 0); checkCuda(cudaMalloc((void**) &dfbrPtr1, X.fbrPtr[1].size() * sizeof(ITYPE)), 0); checkCuda(cudaMalloc((void**) &dfbrIdx1, X.fbrIdx[1].size() * sizeof(ITYPE)), 0); checkCuda(cudaMemcpy(dVals, &(X.vals[0]), X.totNnz * sizeof(DTYPE),cudaMemcpyHostToDevice), 0); checkCuda(cudaMemcpy(dSlcMapperBin, &(X.slcMapperBin[0][0]), X.slcMapperBin[0].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0); checkCuda(cudaMemcpy(dfbrPtr0, &(X.fbrPtr[0][0]), X.fbrPtr[0].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0); checkCuda(cudaMemcpy(dfbrIdx0, &(X.fbrIdx[0][0]), X.fbrIdx[0].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0); checkCuda(cudaMemcpy(dfbrPtr1, &(X.fbrPtr[1][0]), X.fbrPtr[1].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0); checkCuda(cudaMemcpy(dfbrIdx1, &(X.fbrIdx[1][0]), X.fbrIdx[1].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0); // //Matrices DTYPE *dU0, *dU1, *dU2, *dU3; checkCuda(cudaMalloc((void**) &dU0, U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE)), 0); checkCuda(cudaMalloc((void**) &dU1, U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE)), 0); checkCuda(cudaMalloc((void**) &dU2, U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE)), 0); cudaMemset(dU0, 0, U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE)); checkCuda(cudaMemcpy(dU1, &(U[mode1].vals[0]), U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice), 0); checkCuda(cudaMemcpy(dU2, &(U[mode2].vals[0]), U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice), 0); if(X.ndims == 3){ checkCuda(cudaMalloc((void**) &dInds2, X.totNnz * sizeof(ITYPE)), 0); checkCuda(cudaMemcpy(dInds2, &(X.inds[mode2][0]), X.totNnz * sizeof(ITYPE),cudaMemcpyHostToDevice), 0); } if(X.ndims == 4){ ITYPE mode3 = X.modeOrder[3]; checkCuda(cudaMalloc((void**) &dFbrIdx2, X.fbrIdx[2].size() * sizeof(ITYPE)), 0); checkCuda(cudaMalloc((void**) &dFbrPtr2, X.fbrPtr[2].size() * sizeof(ITYPE)), 0); checkCuda(cudaMalloc((void**) &dInds3, X.totNnz * sizeof(ITYPE)), 0); checkCuda(cudaMalloc((void**) &dU3, U[mode3].nRows * U[mode3].nCols * sizeof(DTYPE)), 0); checkCuda(cudaMemcpy(dFbrPtr2, &(X.fbrPtr[2][0]), X.fbrPtr[2].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0); checkCuda(cudaMemcpy(dFbrIdx2, &(X.fbrIdx[2][0]), X.fbrIdx[2].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0); checkCuda(cudaMemcpy(dInds3, &(X.inds[mode3][0]), X.totNnz * sizeof(ITYPE),cudaMemcpyHostToDevice), 0); checkCuda(cudaMemcpy(dU3, &(U[mode3].vals[0]), U[mode3].nRows * U[mode3].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice), 0); } // BLOCK and GRID int BLOCKSIZE = 512; if(Opt.warpPerSlice * 32 > BLOCKSIZE){ cout << "BLOCKSIZE is smaller than work per slice! Increase BLOCKSIZE." << endl; exit(0); } dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1); grid.x = (Opt.warpPerSlice * 32 * X.dims[mode0] + BLOCKSIZE - 1) / BLOCKSIZE; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); float mili = 0; checkCuda(cudaEventRecord(start), __LINE__); // mttkrp_HCSR_kernel_COO<<<grid, block, 32 * sizeof(DTYPE)>>>(dVals, dfbrIdx0, dSlcMapperBin, dInds2, dfbrPtr0, dfbrPtr1, dfbrIdx1, // X.fbrIdx[0].size(), dU0, dU1, dU2,Opt.mode, Opt.R, Opt.warpPerSlice, logOfWarpPerSlice, TbPerSlc, logOfTPS); if(X.ndims == 3) mttkrp_HCSR_kernel_smllBin<<<grid, block, 32 * sizeof(DTYPE)>>>(dVals, dfbrIdx0, dSlcMapperBin, dInds2, dfbrPtr0, dfbrPtr1, dfbrIdx1, X.fbrIdx[0].size(), dU0, dU1, dU2,Opt.mode, Opt.R, Opt.warpPerSlice, logOfWarpPerSlice, TbPerSlc, logOfTPS); else mttkrp_HCSR_kernel_smllBin_4D<<<grid, block, 32 * sizeof(DTYPE)>>>(dVals, dfbrIdx0, dSlcMapperBin, dInds3, dfbrPtr0, dfbrPtr1, dfbrIdx1, dFbrPtr2, dFbrIdx2, X.fbrIdx[0].size(), dU0, dU1, dU2, dU3, Opt.mode, Opt.R, Opt.warpPerSlice, logOfWarpPerSlice, TbPerSlc, logOfTPS); checkCuda(cudaEventRecord(stop), __LINE__); cudaEventSynchronize(stop); checkCuda(cudaEventElapsedTime(&mili, start, stop), __LINE__); cudaDeviceSynchronize(); cout << "HCSR GPU - time " << mili << "ms"<< endl; // check correctness checkCuda(cudaMemcpy(&U[mode0].vals[0], dU0, U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE), cudaMemcpyDeviceToHost), 0); cudaFree(dVals); cudaFree(dU0); cudaFree(dU1); cudaFree(dU2); cudaFree(dU3); cudaFree(dInds2); cudaFree(dInds3); cudaFree(dfbrIdx0); cudaFree(dfbrIdx1); cudaFree(dFbrIdx2); cudaFree(dfbrPtr0); cudaFree(dfbrPtr1); cudaFree(dFbrPtr2); return 0; } int MTTKRP_TILED_COO_GPU(TiledTensor *TiledX, Matrix *U, const Options Opt){ //allocate and memcpy GPU memory //Tensor ITYPE mode = Opt.mode; ITYPE R = Opt.R; ITYPE *dInds0, *dInds1, *dInds2; ITYPE dLoc = 0, totNnz = 0; DTYPE *dVals; // All tile same mode ITYPE mode0 = TiledX[0].modeOrder[0]; ITYPE mode1 = TiledX[0].modeOrder[1]; ITYPE mode2 = TiledX[0].modeOrder[2]; for (int tile = 0; tile < Opt.nTile; ++tile) totNnz += TiledX[tile].totNnz; checkCuda(cudaMalloc((void**) &dVals, totNnz * sizeof(DTYPE)), 0); checkCuda(cudaMalloc((void**) &dInds0, totNnz * sizeof(ITYPE)), 0); checkCuda(cudaMalloc((void**) &dInds1, totNnz * sizeof(ITYPE)), 0); checkCuda(cudaMalloc((void**) &dInds2, totNnz * sizeof(ITYPE)), 0); for (int tile = 0; tile < Opt.nTile; ++tile){ if(tile > 0) dLoc += TiledX[tile-1].totNnz; checkCuda(cudaMemcpy(dVals + dLoc, &(TiledX[tile].vals[0]), TiledX[tile].totNnz * sizeof(DTYPE),cudaMemcpyHostToDevice), 0); checkCuda(cudaMemcpy(dInds0 + dLoc, &(TiledX[tile].inds[mode0][0]), TiledX[tile].totNnz * sizeof(ITYPE),cudaMemcpyHostToDevice), 0); checkCuda(cudaMemcpy(dInds1 + dLoc, &(TiledX[tile].inds[mode1][0]), TiledX[tile].totNnz * sizeof(ITYPE) ,cudaMemcpyHostToDevice), 0); checkCuda(cudaMemcpy(dInds2 + dLoc, &(TiledX[tile].inds[mode2][0]), TiledX[tile].totNnz * sizeof(ITYPE),cudaMemcpyHostToDevice), 0); } // //Matrices DTYPE *dU0, *dU1, *dU2; checkCuda(cudaMalloc((void**) &dU0, U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE)), 0); checkCuda(cudaMalloc((void**) &dU1, U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE)), 0); checkCuda(cudaMalloc((void**) &dU2, U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE)), 0); cudaMemset(dU0, 0, U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE)); checkCuda(cudaMemcpy(dU1, &(U[mode1].vals[0]), U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice), 0); checkCuda(cudaMemcpy(dU2, &(U[mode2].vals[0]), U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice), 0); // BLOCK and GRID int BLOCKSIZE = 128; dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); float mili = 0, GPUTime = 0; // CUDA call dLoc = 0; for (int tile = 0; tile < Opt.nTile; ++tile){ if(tile > 0) dLoc += TiledX[tile-1].totNnz; cout << "Tile " << tile << " launched.. "<<endl; grid.x = (32 * TiledX[tile].totNnz + BLOCKSIZE - 1) / BLOCKSIZE; checkCuda(cudaEventRecord(start), __LINE__); mttkrp_COO_kernel<<<grid, block>>>(dVals + dLoc, dInds0 + dLoc, dInds1 + dLoc, dInds2 + dLoc, TiledX[tile].totNnz, dU0, dU1, dU2, mode, R); checkCuda(cudaEventRecord(stop), __LINE__); cudaEventSynchronize(stop); checkCuda(cudaEventElapsedTime(&mili, start, stop), __LINE__); cudaDeviceSynchronize(); cout << "Tile: " << tile << " - time " << mili << "ms"<< endl; GPUTime += mili; } cout << "COO GPU - time " << GPUTime << "ms"<< endl; // check correctness checkCuda(cudaMemcpy(&U[mode0].vals[0], dU0, U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE), cudaMemcpyDeviceToHost), 0); cudaFree(dVals); cudaFree(dU0); cudaFree(dU1); cudaFree(dU2); cudaFree(dInds0); cudaFree(dInds1); cudaFree(dInds2); return 0; } int MTTKRP_B_HCSR_GPU(TiledTensor *TiledX, Matrix *U, const Options &Opt){ /*choosing kernel type: false: B-CSF- IPDPS work, true: parallelism at fiber level, call slc_atomic_fbrlblpar function*/ bool slcAtomicFbrLvlPar = false; /* Allocate and memcpy GPU memory */ //Tensor ITYPE *dInds2, *dInds3, *dfbrPtr0, *dfbrIdx0, *dfbrPtr1, *dfbrIdx1, *dFbrPtr2, *dFbrIdx2, *dSlcMapperBin, *dFbrLikeSlcInds; DTYPE *dVals; ITYPE dLoc = 0, dSlcLoc = 0, dSlcIdxLoc = 0, dFbrLoc =0, dFbrIdxLoc =0, dBinLoc = 0, dFbrLoc2 =0; ITYPE totNnz = 0, totSlcPtr = 0, totSlcIdx = 0, totFbrPtr = 0, totFbrIdx = 0, totFbrPtr2 = 0; // // All tile same mode ITYPE mode0 = TiledX[0].modeOrder[0]; ITYPE mode1 = TiledX[0].modeOrder[1]; ITYPE mode2 = TiledX[0].modeOrder[2]; ITYPE mode3 =((TiledX[0].ndims == 4) ? TiledX[0].modeOrder[3] : 0) ; for (int tile = 0; tile < Opt.nTile; ++tile){ totNnz += TiledX[tile].totNnz; totSlcPtr += TiledX[tile].fbrPtr[0].size() ; totSlcIdx += TiledX[tile].fbrIdx[0].size() ; totFbrPtr += TiledX[tile].fbrPtr[1].size() ; totFbrIdx += TiledX[tile].fbrIdx[1].size() ; totFbrPtr2 += ((TiledX[tile].ndims == 4) ? TiledX[tile].fbrPtr[2].size() : 0) ; } cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); float memcpyTime = 0; cuda_timer_start(start); checkCuda(cudaMalloc((void**) &dVals, totNnz * sizeof(DTYPE)), 0); checkCuda(cudaMalloc((void**) &dfbrPtr0, totSlcPtr * sizeof(ITYPE)), 0); checkCuda(cudaMalloc((void**) &dfbrIdx0, totSlcIdx * sizeof(ITYPE)), 0); checkCuda(cudaMalloc((void**) &dSlcMapperBin, totSlcPtr * sizeof(ITYPE)), 0); checkCuda(cudaMalloc((void**) &dfbrPtr1, totFbrPtr * sizeof(ITYPE)), 0); checkCuda(cudaMalloc((void**) &dfbrIdx1, totFbrIdx * sizeof(ITYPE)), 0); checkCuda(cudaMalloc((void**) &dFbrLikeSlcInds, totFbrIdx * sizeof(ITYPE)), 0); if(TiledX[0].ndims == 3) checkCuda(cudaMalloc((void**) &dInds2, totNnz * sizeof(ITYPE)), 0); if(TiledX[0].ndims == 4){ checkCuda(cudaMalloc((void**) &dFbrIdx2, totFbrPtr2 * sizeof(ITYPE)), 0); checkCuda(cudaMalloc((void**) &dFbrPtr2, totFbrPtr2 * sizeof(ITYPE)), 0); checkCuda(cudaMalloc((void**) &dInds3, totNnz * sizeof(ITYPE)), 0); } /* cuda memcopy for tiled parts*/ for (int tile = 0; tile < Opt.nTile; ++tile){ if(tile > 0) { dLoc += TiledX[tile-1].totNnz; dSlcLoc += TiledX[tile - 1].fbrPtr[0].size(); // all tile same dSlcIdxLoc += TiledX[tile - 1].fbrIdx[0].size(); dFbrLoc += TiledX[tile - 1].fbrPtr[1].size(); dFbrIdxLoc += TiledX[tile - 1].fbrIdx[1].size(); dFbrLoc2 += ((TiledX[tile].ndims == 4) ? TiledX[tile - 1].fbrPtr[2].size() : 0) ; } checkCuda(cudaMemcpy(dVals + dLoc, &(TiledX[tile].vals[0]), TiledX[tile].totNnz * sizeof(DTYPE),cudaMemcpyHostToDevice), 0); checkCuda(cudaMemcpy(dfbrPtr0 + dSlcLoc, &(TiledX[tile].fbrPtr[0][0]), TiledX[tile].fbrPtr[0].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0); checkCuda(cudaMemcpy(dfbrIdx0 + dSlcIdxLoc, &(TiledX[tile].fbrIdx[0][0]), TiledX[tile].fbrIdx[0].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0); checkCuda(cudaMemcpy(dfbrPtr1 + dFbrLoc, &(TiledX[tile].fbrPtr[1][0]), TiledX[tile].fbrPtr[1].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0); checkCuda(cudaMemcpy(dfbrIdx1 + dFbrIdxLoc, &(TiledX[tile].fbrIdx[1][0]), TiledX[tile].fbrIdx[1].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0); if(slcAtomicFbrLvlPar) checkCuda(cudaMemcpy(dFbrLikeSlcInds + dFbrIdxLoc, &(TiledX[tile].fbrLikeSlcInds[0]), TiledX[tile].fbrIdx[1].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0); if(TiledX[tile].ndims == 3) checkCuda(cudaMemcpy(dInds2 + dLoc, &(TiledX[tile].inds[TiledX[tile].modeOrder[2]][0]), TiledX[tile].totNnz * sizeof(ITYPE),cudaMemcpyHostToDevice), 0); if(TiledX[tile].ndims == 4){ checkCuda(cudaMemcpy(dFbrPtr2 + dFbrLoc2, &(TiledX[tile].fbrPtr[2][0]), TiledX[tile].fbrPtr[2].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0); checkCuda(cudaMemcpy(dFbrIdx2 + dFbrLoc2, &(TiledX[tile].fbrIdx[2][0]), TiledX[tile].fbrIdx[2].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0); checkCuda(cudaMemcpy(dInds3 + dLoc, &(TiledX[tile].inds[mode3][0]), TiledX[tile].totNnz * sizeof(ITYPE),cudaMemcpyHostToDevice), 0); } dBinLoc = 0; for (int bin = 0; bin < Opt.nBin; ++bin){ if(bin > 0) dBinLoc += TiledX[tile].slcMapperBin[bin-1].size(); checkCuda(cudaMemcpy(dSlcMapperBin + dSlcIdxLoc + dBinLoc, &(TiledX[tile].slcMapperBin[bin][0]), TiledX[tile].slcMapperBin[bin].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0); } } cuda_timer_stop(start, stop, memcpyTime); cout << "Memcopy time " << memcpyTime << endl; // //Matrices DTYPE *dU0, *dU1, *dU2, *dU3; checkCuda(cudaMalloc((void**) &dU0, U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE)), 0); checkCuda(cudaMalloc((void**) &dU1, U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE)), 0); checkCuda(cudaMalloc((void**) &dU2, U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE)), 0); cudaMemset(dU0, 0, U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE)); checkCuda(cudaMemcpy(dU1, &(U[mode1].vals[0]), U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice), 0); checkCuda(cudaMemcpy(dU2, &(U[mode2].vals[0]), U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice), 0); if(TiledX[0].ndims == 4){ checkCuda(cudaMalloc((void**) &dU3, U[mode3].nRows * U[mode3].nCols * sizeof(DTYPE)), 0); checkCuda(cudaMemcpy(dU3, &(U[mode3].vals[0]), U[mode3].nRows * U[mode3].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice), 0); } // BLOCK and GRID int BLOCKSIZE = 512; unsigned int rowInATB = BLOCKSIZE / (Opt.warpPerSlice*32); if(Opt.warpPerSlice * 32 > BLOCKSIZE){ cout << "BLOCKSIZE is smaller than work per slice! Increase BLOCKSIZE." << endl; exit(0); } cudaStream_t streams[Opt.nBin]; float mili = 0, GPUTime = 0, CPUtimer = 0, allModeGPUTime = 0; int smallBinEndsAt = 5; /* Warp per slice and threadblock per size */ int *warpPerSlc = new int[Opt.nBin]; int *logOfWarpPerSlc = new int[Opt.nBin]; int *TbPerSlc = new int[Opt.nBin]; int *logOfTbPerSlc = new int[Opt.nBin]; for (int bin = 0; bin < Opt.nBin ; ++bin){ TbPerSlc[bin] = 1; warpPerSlc[bin] = ((bin > 0) ? 2 << (bin - 1) : 1); if(warpPerSlc[bin] > 16) warpPerSlc[bin] = 16; logOfWarpPerSlc[bin] = log2(warpPerSlc[bin]); TbPerSlc[bin] = 1; logOfTbPerSlc[bin] = 0; if (bin >= smallBinEndsAt){ TbPerSlc[bin] = 1 << (bin - smallBinEndsAt + 1); // 1st big bin starts with 1 TB 1 << 1 not 1 << 5 if(TbPerSlc[bin] > 32) TbPerSlc[bin] = 32; logOfTbPerSlc[bin] = log2(TbPerSlc[bin]); warpPerSlc[bin] = 16; logOfWarpPerSlc[bin] = 4; } } // TBD: change warpPerSlc to warpPerSlc[bin] and all int slcPerTb = 1; dLoc = 0, dSlcLoc = 0, dSlcIdxLoc = 0; dFbrLoc =0, dFbrIdxLoc = 0, dFbrLoc2= 0; for (int bin = 0; bin < Opt.nBin; ++bin) cudaStreamCreate(&streams[bin]); /*MTTKRP on Opt.mode*/ int MTTKRPmode = mode0;//Opt.mode; for (int tile = 0; tile < Opt.nTile; ++tile){ dBinLoc = 0; if(tile > 0) { dLoc += TiledX[tile-1].totNnz; dSlcLoc += TiledX[tile - 1].fbrPtr[0].size(); dSlcIdxLoc += TiledX[tile - 1].fbrIdx[0].size(); dFbrLoc += TiledX[tile - 1].fbrPtr[1].size(); dFbrIdxLoc += TiledX[tile - 1].fbrIdx[1].size(); dFbrLoc2 += ((TiledX[0].ndims == 4) ? TiledX[tile - 1].fbrPtr[2].size() : 0) ; } BLOCKSIZE = (( slcAtomicFbrLvlPar == true) ? Opt.TBsize : 512) ; dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1); int smallBinEndsAt = 5; int slcPerTb = 0; // int warpPerFbr = BLOCKSIZE/32;//1;//Opt.warpPerSlice;//4;//; // int logOfWarpPerFbr = log2(warpPerFbr); // int bin = 0; // int fbrPerWarp = 1;//BLOCKSIZE/32; // dont overflow TB // int logOfFbrPerWarp = log2(fbrPerWarp); int warpPerFbr =Opt.warpPerSlice;//4;//; BLOCKSIZE/32;//1;// int logOfWarpPerFbr = log2(warpPerFbr); int fbrPerWarp = Opt.fiberPerWarp;//1;//BLOCKSIZE/32; // dont overflow TB int logOfFbrPerWarp = log2(fbrPerWarp ); grid.x = ( warpPerFbr * 32 * ((TiledX[tile].nFibers+fbrPerWarp-1)/fbrPerWarp) + BLOCKSIZE - 1) / BLOCKSIZE; double t0 = seconds(); cuda_timer_start(start); if(slcAtomicFbrLvlPar){ if(TiledX[0].ndims == 3) mttkrp_MIHCSR_kernel_slc_atomic_fbrLvlPar<<<grid, block, 0, 0>>>(dVals + dLoc, dFbrLikeSlcInds + dFbrLoc, dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, TiledX[tile].nFibers, dU0, dU1, dU2, Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr, fbrPerWarp, logOfFbrPerWarp); else mttkrp_MIHCSR_kernel_slc_atomic_fbrLvlPar_4D<<<grid, block, 0, 0>>>(dVals + dLoc, dFbrLikeSlcInds + dFbrLoc, dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, TiledX[tile].nFibers, dU0, dU1, dU2, dU3, Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr, fbrPerWarp, logOfFbrPerWarp); } else{ for (int bin = 0; bin < Opt.nBin ; ++bin){ if(bin < smallBinEndsAt){ ITYPE shSize = 0;//slcPerTb * 32 * sizeof(DTYPE); slcPerTb = 16 / warpPerSlc[bin]; dBinLoc += ((bin > 0) ? TiledX[tile].slcMapperBin[bin-1].size() : 0); grid.x = ( TbPerSlc[bin] * warpPerSlc[bin] * 32 * TiledX[tile].slcMapperBin[bin].size() + BLOCKSIZE - 1) / BLOCKSIZE; if(TiledX[0].ndims == 3) mttkrp_HCSR_kernel_smllBin<<<grid, block, shSize , streams[bin]>>>(dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc, dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, TiledX[tile].slcMapperBin[bin].size(), dU0, dU1, dU2, Opt.mode, Opt.R, warpPerSlc[bin], logOfWarpPerSlc[bin], TbPerSlc[bin], logOfTbPerSlc[bin]); else mttkrp_HCSR_kernel_smllBin_4D<<<grid, block, shSize , streams[bin]>>>(dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc, dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, TiledX[tile].slcMapperBin[bin].size(), dU0, dU1, dU2, dU3, Opt.mode, Opt.R, warpPerSlc[bin], logOfWarpPerSlc[bin], TbPerSlc[bin], logOfTbPerSlc[bin]); } // Processing heavy bin.. multiple TB per slice else{ dBinLoc += TiledX[tile].slcMapperBin[bin-1].size(); grid.x = (TbPerSlc[bin] * warpPerSlc[bin] * 32 * TiledX[tile].slcMapperBin[bin].size() + BLOCKSIZE - 1) / BLOCKSIZE; if(TiledX[0].ndims == 3) mttkrp_HCSR_kernel_hvyBin<<<grid, block, 0, streams[bin]>>>(dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc, dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, TiledX[tile].slcMapperBin[bin].size(), dU0, dU1, dU2, Opt.mode, Opt.R, warpPerSlc[bin], logOfWarpPerSlc[bin], TbPerSlc[bin], logOfTbPerSlc[bin]); else mttkrp_HCSR_kernel_hvyBin_4D<<<grid, block, 0, streams[bin]>>>(dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc, dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, TiledX[tile].slcMapperBin[bin].size(), dU0, dU1, dU2, dU3, Opt.mode, Opt.R, warpPerSlc[bin], logOfWarpPerSlc[bin], TbPerSlc[bin], logOfTbPerSlc[bin]); } } } cuda_timer_stop(start, stop, mili); CPUtimer += seconds() - t0; GPUTime += mili; if(Opt.verbose){ cout << "Tile: " << tile << " - time: " << mili << "ms"; cout <<" nnz: " << TiledX[tile].totNnz << " nFibers: " << TiledX[tile].fbrPtr[1].size() << " nSlc " << TiledX[tile].fbrIdx[0].size() << " "; cout << endl; } } allModeGPUTime += GPUTime; cout << "B-CSF-GPU-mode " << MTTKRPmode <<" :" << GPUTime << "," << endl; for (int bin = 0; bin < Opt.nBin; ++bin) cudaStreamDestroy(streams[bin]); // check correctness checkCuda(cudaMemcpy(&U[mode0].vals[0], dU0, U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE), cudaMemcpyDeviceToHost), 0); cudaFree(dVals); cudaFree(dU0); cudaFree(dU1); cudaFree(dU2); cudaFree(dU3); cudaFree(dfbrIdx0); cudaFree(dInds2); cudaFree(dInds3); cudaFree(dfbrIdx0); cudaFree(dfbrIdx1); cudaFree(dFbrIdx2); cudaFree(dfbrPtr0); cudaFree(dfbrPtr1); cudaFree(dFbrPtr2); cudaFree(dFbrLikeSlcInds); return 0; } int MTTKRP_B_HCSR_GPU_ANYMODE(TiledTensor *TiledX, Matrix *U, const Options &Opt, int mode){ /* Allocate and memcpy GPU memory */ //Tensor ITYPE *dInds2, *dInds3, *dfbrPtr0, *dfbrIdx0, *dfbrPtr1, *dfbrIdx1, *dFbrPtr2, *dFbrIdx2, *dSlcMapperBin, *dFbrLikeSlcInds; DTYPE *dVals; ITYPE dLoc = 0, dSlcLoc = 0, dSlcIdxLoc = 0, dFbrLoc =0, dFbrIdxLoc =0, dBinLoc = 0, dFbrLoc2 =0; ITYPE totNnz = 0, totSlcPtr = 0, totSlcIdx = 0, totFbrPtr = 0, totFbrIdx = 0, totFbrPtr2 = 0; // // All tile same mode ITYPE mode0 = 0;//TiledX[0].modeOrder[0]; ITYPE mode1 = 1;//TiledX[0].modeOrder[1]; ITYPE mode2 = 2;//TiledX[0].modeOrder[2]; ITYPE mode3 = 3;//((TiledX[0].ndims == 4) ? TiledX[0].modeOrder[3] : 0) ; for (int tile = 0; tile < Opt.nTile; ++tile){ totNnz += TiledX[tile].totNnz; totSlcPtr += TiledX[tile].fbrPtr[0].size() ; totSlcIdx += TiledX[tile].fbrIdx[0].size() ; totFbrPtr += TiledX[tile].fbrPtr[1].size() ; totFbrIdx += TiledX[tile].fbrIdx[1].size() ; totFbrPtr2 += ((TiledX[tile].ndims == 4) ? TiledX[tile].fbrPtr[2].size() : 0) ; } double t0 = seconds(); checkCuda(cudaMalloc((void**) &dVals, totNnz * sizeof(DTYPE)), 0); checkCuda(cudaMalloc((void**) &dfbrPtr0, totSlcPtr * sizeof(ITYPE)), 0); checkCuda(cudaMalloc((void**) &dfbrIdx0, totSlcIdx * sizeof(ITYPE)), 0); checkCuda(cudaMalloc((void**) &dSlcMapperBin, totSlcPtr * sizeof(ITYPE)), 0); checkCuda(cudaMalloc((void**) &dfbrPtr1, totFbrPtr * sizeof(ITYPE)), 0); checkCuda(cudaMalloc((void**) &dfbrIdx1, totFbrIdx * sizeof(ITYPE)), 0); if(TiledX[0].ndims == 3) checkCuda(cudaMalloc((void**) &dInds2, totNnz * sizeof(ITYPE)), 0); if(TiledX[0].ndims == 4){ checkCuda(cudaMalloc((void**) &dFbrIdx2, totFbrPtr2 * sizeof(ITYPE)), 0); checkCuda(cudaMalloc((void**) &dFbrPtr2, totFbrPtr2 * sizeof(ITYPE)), 0); checkCuda(cudaMalloc((void**) &dInds3, totNnz * sizeof(ITYPE)), 0); } /* cuda memcopy for tiled parts*/ for (int tile = 0; tile < Opt.nTile; ++tile){ if(tile > 0) { dLoc += TiledX[tile-1].totNnz; dSlcLoc += TiledX[tile - 1].fbrPtr[0].size(); // all tile same dSlcIdxLoc += TiledX[tile - 1].fbrIdx[0].size(); dFbrLoc += TiledX[tile - 1].fbrPtr[1].size(); dFbrIdxLoc += TiledX[tile - 1].fbrIdx[1].size(); dFbrLoc2 += ((TiledX[tile].ndims == 4) ? TiledX[tile - 1].fbrPtr[2].size() : 0) ; } checkCuda(cudaMemcpy(dVals + dLoc, &(TiledX[tile].vals[0]), TiledX[tile].totNnz * sizeof(DTYPE),cudaMemcpyHostToDevice), 0); checkCuda(cudaMemcpy(dfbrPtr0 + dSlcLoc, &(TiledX[tile].fbrPtr[0][0]), TiledX[tile].fbrPtr[0].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0); checkCuda(cudaMemcpy(dfbrIdx0 + dSlcIdxLoc, &(TiledX[tile].fbrIdx[0][0]), TiledX[tile].fbrIdx[0].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0); checkCuda(cudaMemcpy(dfbrPtr1 + dFbrLoc, &(TiledX[tile].fbrPtr[1][0]), TiledX[tile].fbrPtr[1].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0); checkCuda(cudaMemcpy(dfbrIdx1 + dFbrIdxLoc, &(TiledX[tile].fbrIdx[1][0]), TiledX[tile].fbrIdx[1].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0); if(TiledX[tile].ndims == 3) checkCuda(cudaMemcpy(dInds2 + dLoc, &(TiledX[tile].inds[TiledX[tile].modeOrder[2]][0]), TiledX[tile].totNnz * sizeof(ITYPE),cudaMemcpyHostToDevice), 0); if(TiledX[tile].ndims == 4){ checkCuda(cudaMemcpy(dFbrPtr2 + dFbrLoc2, &(TiledX[tile].fbrPtr[2][0]), TiledX[tile].fbrPtr[2].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0); checkCuda(cudaMemcpy(dFbrIdx2 + dFbrLoc2, &(TiledX[tile].fbrIdx[2][0]), TiledX[tile].fbrIdx[2].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0); checkCuda(cudaMemcpy(dInds3 + dLoc, &(TiledX[tile].inds[TiledX[tile].modeOrder[3]][0]), TiledX[tile].totNnz * sizeof(ITYPE),cudaMemcpyHostToDevice), 0); } dBinLoc = 0; for (int bin = 0; bin < Opt.nBin; ++bin){ if(bin > 0) dBinLoc += TiledX[tile].slcMapperBin[bin-1].size(); checkCuda(cudaMemcpy(dSlcMapperBin + dSlcIdxLoc + dBinLoc, &(TiledX[tile].slcMapperBin[bin][0]), TiledX[tile].slcMapperBin[bin].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0); } } t0 = seconds(); unsigned int *dULoc = new unsigned int[TiledX[0].ndims]; unsigned int *szDU = new unsigned int[TiledX[0].ndims]; // //Matrices DTYPE *dU;// *dU0, *dU1, *dU2, *dU3; ITYPE mtxSize = ((TiledX[0].ndims == 3) ? (U[mode0].nRows + U[mode1].nRows + U[mode2].nRows) * U[mode0].nCols : (U[mode0].nRows + U[mode1].nRows + U[mode2].nRows + U[mode3].nRows) * U[mode0].nCols ); checkCuda(cudaMalloc((void**) &dU, mtxSize * sizeof(DTYPE)), 0); for (int m = 0; m < TiledX[0].ndims; ++m) szDU[m] = U[m].nRows * U[m].nCols; ITYPE mtxLoc = 0; for (int m = 0; m < mode; ++m) mtxLoc += szDU[m]; checkCuda(cudaMemcpy(dU + 0, &(U[mode0].vals[0]), U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice), 0); checkCuda(cudaMemcpy(dU + szDU[0], &(U[mode1].vals[0]), U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice), 0); checkCuda(cudaMemcpy(dU + szDU[0] + szDU[1], &(U[mode2].vals[0]), U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice), 0); if(TiledX[0].ndims == 4) checkCuda(cudaMemcpy(dU + szDU[0] + szDU[1] + szDU[2], &(U[mode3].vals[0]), U[mode3].nRows * U[mode3].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice), 0); cudaMemset(dU + mtxLoc, 0, U[mode].nRows * U[mode0].nCols * sizeof(DTYPE)); // BLOCK and GRID int BLOCKSIZE = 512; unsigned int rowInATB = BLOCKSIZE / (Opt.warpPerSlice*32); if(Opt.warpPerSlice * 32 > BLOCKSIZE){ cout << "BLOCKSIZE is smaller than work per slice! Increase BLOCKSIZE." << endl; exit(0); } cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaStream_t streams[Opt.nBin]; float mili = 0, GPUTime = 0, CPUtimer = 0, allModeGPUTime = 0; int smallBinEndsAt = 5; /* Warp per slice and threadblock per size */ int *warpPerSlc = new int[Opt.nBin]; int *logOfWarpPerSlc = new int[Opt.nBin]; int *TbPerSlc = new int[Opt.nBin]; int *logOfTbPerSlc = new int[Opt.nBin]; for (int bin = 0; bin < Opt.nBin ; ++bin){ TbPerSlc[bin] = 1; warpPerSlc[bin] = ((bin > 0) ? 2 << (bin - 1) : 1); if(warpPerSlc[bin] > 16) warpPerSlc[bin] = 16; logOfWarpPerSlc[bin] = log2(warpPerSlc[bin]); TbPerSlc[bin] = 1; logOfTbPerSlc[bin] = 0; if (bin >= smallBinEndsAt){ TbPerSlc[bin] = 1 << (bin - smallBinEndsAt + 1); // 1st big bin starts with 1 TB 1 << 1 not 1 << 5 if(TbPerSlc[bin] > 32) TbPerSlc[bin] = 32; logOfTbPerSlc[bin] = log2(TbPerSlc[bin]); warpPerSlc[bin] = 16; logOfWarpPerSlc[bin] = 4; } } // TBD: change warpPerSlc to warpPerSlc[bin] and all int slcPerTb = 1; dLoc = 0, dSlcLoc = 0, dSlcIdxLoc = 0; dFbrLoc =0, dFbrIdxLoc = 0, dFbrLoc2= 0; for (int bin = 0; bin < Opt.nBin; ++bin) cudaStreamCreate(&streams[bin]); /*MTTKRP on Opt.mode*/ int MTTKRPmode = mode;//Opt.mode; for (int tile = 0; tile < Opt.nTile; ++tile){ /* matrix order according to mode order*/ for (int mm = 0; mm < TiledX[0].ndims; ++mm){ int curMode = TiledX[tile].modeOrder[mm]; dULoc[mm] = 0; for (int q = 0; q < curMode; ++q) dULoc[mm] += szDU[q % TiledX[0].ndims]; //1 2 3 0 } dBinLoc = 0; if(tile > 0) { dLoc += TiledX[tile-1].totNnz; dSlcLoc += TiledX[tile - 1].fbrPtr[0].size(); dSlcIdxLoc += TiledX[tile - 1].fbrIdx[0].size(); dFbrLoc += TiledX[tile - 1].fbrPtr[1].size(); dFbrIdxLoc += TiledX[tile - 1].fbrIdx[1].size(); dFbrLoc2 += ((TiledX[0].ndims == 4) ? TiledX[tile - 1].fbrPtr[2].size() : 0) ; } // BLOCKSIZE = (( slcAtomicFbrLvlPar == true) ? Opt.TBsize : 512) ; dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1); int smallBinEndsAt = 5; int slcPerTb = 0; // int warpPerFbr = BLOCKSIZE/32;//1;//Opt.warpPerSlice;//4;//; // int logOfWarpPerFbr = log2(warpPerFbr); // int bin = 0; // int fbrPerWarp = 1;//BLOCKSIZE/32; // dont overflow TB // int logOfFbrPerWarp = log2(fbrPerWarp); int warpPerFbr =Opt.warpPerSlice;//4;//; BLOCKSIZE/32;//1;// int logOfWarpPerFbr = log2(warpPerFbr); int fbrPerWarp = Opt.fiberPerWarp;//1;//BLOCKSIZE/32; // dont overflow TB int logOfFbrPerWarp = log2(fbrPerWarp ); double t0 = seconds(); cuda_timer_start(start); if(mode == TiledX[0].modeOrder[0]){ for (int bin = 0; bin < Opt.nBin ; ++bin){ if(bin < smallBinEndsAt){ ITYPE shSize = 0;//slcPerTb * 32 * sizeof(DTYPE); slcPerTb = 16 / warpPerSlc[bin]; dBinLoc += ((bin > 0) ? TiledX[tile].slcMapperBin[bin-1].size() : 0); grid.x = ( TbPerSlc[bin] * warpPerSlc[bin] * 32 * TiledX[tile].slcMapperBin[bin].size() + BLOCKSIZE - 1) / BLOCKSIZE; if(TiledX[0].ndims == 3) mttkrp_HCSR_kernel_smllBin<<<grid, block, shSize , streams[bin]>>>(dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc, dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, TiledX[tile].slcMapperBin[bin].size(), dU + dULoc[0], dU + dULoc[1], dU + dULoc[2], Opt.mode, Opt.R, warpPerSlc[bin], logOfWarpPerSlc[bin], TbPerSlc[bin], logOfTbPerSlc[bin]); else mttkrp_HCSR_kernel_smllBin_4D<<<grid, block, shSize , streams[bin]>>>(dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc, dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, TiledX[tile].slcMapperBin[bin].size(), dU + dULoc[0], dU + dULoc[1], dU + dULoc[2], dU + dULoc[3], Opt.mode, Opt.R, warpPerSlc[bin], logOfWarpPerSlc[bin], TbPerSlc[bin], logOfTbPerSlc[bin]); } // Processing heavy bin.. multiple TB per slice else{ dBinLoc += TiledX[tile].slcMapperBin[bin-1].size(); grid.x = (TbPerSlc[bin] * warpPerSlc[bin] * 32 * TiledX[tile].slcMapperBin[bin].size() + BLOCKSIZE - 1) / BLOCKSIZE; if(TiledX[0].ndims == 3) mttkrp_HCSR_kernel_hvyBin<<<grid, block, 0, streams[bin]>>>(dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc, dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, TiledX[tile].slcMapperBin[bin].size(), dU + dULoc[0], dU + dULoc[1], dU + dULoc[2], Opt.mode, Opt.R, warpPerSlc[bin], logOfWarpPerSlc[bin], TbPerSlc[bin], logOfTbPerSlc[bin]); else mttkrp_HCSR_kernel_hvyBin_4D<<<grid, block, 0, streams[bin]>>>(dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc, dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, TiledX[tile].slcMapperBin[bin].size(), dU + dULoc[0], dU + dULoc[1], dU + dULoc[2], dU + dULoc[3], Opt.mode, Opt.R, warpPerSlc[bin], logOfWarpPerSlc[bin], TbPerSlc[bin], logOfTbPerSlc[bin]); } } } else if(TiledX[0].ndims == 4 && TiledX[0].modeOrder[1] == MTTKRPmode && TiledX[0].totNnz){ for (int bin = 0; bin < Opt.nBin ; ++bin){ if(bin < smallBinEndsAt){ ITYPE shSize = 0;//slcPerTb * 32 * sizeof(DTYPE); slcPerTb = 16 / warpPerSlc[bin]; dBinLoc += ((bin > 0) ? TiledX[tile].slcMapperBin[bin-1].size() : 0); grid.x = ( TbPerSlc[bin] * warpPerSlc[bin] * 32 * TiledX[tile].slcMapperBin[bin].size() + BLOCKSIZE - 1) / BLOCKSIZE; mttkrp_MIHCSR_kernel_smllBin_fbrS_atomic_4D<<<grid, block, shSize , streams[bin]>>>(dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc, dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, TiledX[tile].slcMapperBin[bin].size(), dU + dULoc[1], dU + dULoc[2], dU + dULoc[3], dU + dULoc[0], Opt.mode, Opt.R, warpPerSlc[bin], logOfWarpPerSlc[bin], TbPerSlc[bin], logOfTbPerSlc[bin]); } // Processing heavy bin.. multiple TB per slice else{ dBinLoc += TiledX[tile].slcMapperBin[bin-1].size(); grid.x = (TbPerSlc[bin] * warpPerSlc[bin] * 32 * TiledX[tile].slcMapperBin[bin].size() + BLOCKSIZE - 1) / BLOCKSIZE; mttkrp_MIHCSR_kernel_hvyBin_fbrS_atomic_4D<<<grid, block, 0, streams[bin]>>>(dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc, dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, TiledX[tile].slcMapperBin[bin].size(), dU + dULoc[1], dU + dULoc[2], dU + dULoc[3], dU + dULoc[0], Opt.mode, Opt.R, warpPerSlc[bin], logOfWarpPerSlc[bin], TbPerSlc[bin], logOfTbPerSlc[bin]); } } } else if(mode == TiledX[0].modeOrder[TiledX[0].ndims-2]){ for (int bin = 0; bin < Opt.nBin ; ++bin){ if(bin < smallBinEndsAt){ ITYPE shSize = 0;//slcPerTb * 32 * sizeof(DTYPE); slcPerTb = 16 / warpPerSlc[bin]; dBinLoc += ((bin > 0) ? TiledX[tile].slcMapperBin[bin-1].size() : 0); grid.x = ( TbPerSlc[bin] * warpPerSlc[bin] * 32 * TiledX[tile].slcMapperBin[bin].size() + BLOCKSIZE - 1) / BLOCKSIZE; if(TiledX[0].ndims == 3) mttkrp_MIHCSR_kernel_smllBin_fbr_atomic<<<grid, block, shSize , streams[bin]>>>(dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc, dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, TiledX[tile].slcMapperBin[bin].size(), dU + dULoc[1], dU + dULoc[2], dU + dULoc[0], Opt.mode, Opt.R, warpPerSlc[bin], logOfWarpPerSlc[bin], TbPerSlc[bin], logOfTbPerSlc[bin]); else mttkrp_MIHCSR_kernel_smllBin_fbr_atomic_4D<<<grid, block, shSize , streams[bin]>>>(dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc, dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, TiledX[tile].slcMapperBin[bin].size(), dU + dULoc[2], dU + dULoc[3], dU + dULoc[0], dU + dULoc[1], Opt.mode, Opt.R, warpPerSlc[bin], logOfWarpPerSlc[bin], TbPerSlc[bin], logOfTbPerSlc[bin]); } // Processing heavy bin.. multiple TB per slice else{ dBinLoc += TiledX[tile].slcMapperBin[bin-1].size(); grid.x = (TbPerSlc[bin] * warpPerSlc[bin] * 32 * TiledX[tile].slcMapperBin[bin].size() + BLOCKSIZE - 1) / BLOCKSIZE; if(TiledX[0].ndims == 3) mttkrp_MIHCSR_kernel_hvyBin_fbr_atomic<<<grid, block, 0, streams[bin]>>>(dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc, dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, TiledX[tile].slcMapperBin[bin].size(), dU + dULoc[1], dU + dULoc[2], dU + dULoc[0], Opt.mode, Opt.R, warpPerSlc[bin], logOfWarpPerSlc[bin], TbPerSlc[bin], logOfTbPerSlc[bin]); else mttkrp_MIHCSR_kernel_hvyBin_fbr_atomic_4D<<<grid, block, 0, streams[bin]>>>(dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc, dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, TiledX[tile].slcMapperBin[bin].size(), dU + dULoc[2], dU + dULoc[3], dU + dULoc[0], dU + dULoc[1], Opt.mode, Opt.R, warpPerSlc[bin], logOfWarpPerSlc[bin], TbPerSlc[bin], logOfTbPerSlc[bin]); } } } else if(mode == TiledX[0].modeOrder[TiledX[0].ndims-1]){ for (int bin = 0; bin < Opt.nBin ; ++bin){ if(bin < smallBinEndsAt){ ITYPE shSize = 0;//slcPerTb * 32 * sizeof(DTYPE); slcPerTb = 16 / warpPerSlc[bin]; dBinLoc += ((bin > 0) ? TiledX[tile].slcMapperBin[bin-1].size() : 0); grid.x = ( TbPerSlc[bin] * warpPerSlc[bin] * 32 * TiledX[tile].slcMapperBin[bin].size() + BLOCKSIZE - 1) / BLOCKSIZE; if(TiledX[0].ndims == 3) mttkrp_MIHCSR_kernel_smllBin_all_atomic <<<grid, block, shSize , streams[bin]>>>(dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc, dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, TiledX[tile].slcMapperBin[bin].size(), dU + dULoc[2], dU + dULoc[0], dU + dULoc[1], Opt.mode, Opt.R, warpPerSlc[bin], logOfWarpPerSlc[bin], TbPerSlc[bin], logOfTbPerSlc[bin]); else mttkrp_MIHCSR_kernel_smllBin_all_atomic_4D<<<grid, block, shSize , streams[bin]>>>(dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc, dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, TiledX[tile].slcMapperBin[bin].size(), dU + dULoc[3], dU + dULoc[0], dU + dULoc[1], dU + dULoc[2], Opt.mode, Opt.R, warpPerSlc[bin], logOfWarpPerSlc[bin], TbPerSlc[bin], logOfTbPerSlc[bin]); } // Processing heavy bin.. multiple TB per slice else{ dBinLoc += TiledX[tile].slcMapperBin[bin-1].size(); grid.x = (TbPerSlc[bin] * warpPerSlc[bin] * 32 * TiledX[tile].slcMapperBin[bin].size() + BLOCKSIZE - 1) / BLOCKSIZE; if(TiledX[0].ndims == 3) mttkrp_MIHCSR_kernel_hvyBin_all_atomic<<<grid, block, 0, streams[bin]>>>(dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc, dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, TiledX[tile].slcMapperBin[bin].size(), dU + dULoc[2], dU + dULoc[0], dU + dULoc[1], Opt.mode, Opt.R, warpPerSlc[bin], logOfWarpPerSlc[bin], TbPerSlc[bin], logOfTbPerSlc[bin]); else mttkrp_MIHCSR_kernel_hvyBin_all_atomic_4D<<<grid, block, 0, streams[bin]>>>(dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc, dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, TiledX[tile].slcMapperBin[bin].size(), dU + dULoc[3], dU + dULoc[0], dU + dULoc[1], dU + dULoc[2], Opt.mode, Opt.R, warpPerSlc[bin], logOfWarpPerSlc[bin], TbPerSlc[bin], logOfTbPerSlc[bin]); } } } cuda_timer_stop(start, stop, mili); CPUtimer += seconds() - t0; GPUTime += mili; // if(Opt.verbose) { cout << "Tile: " << tile << " - time: " << mili << "ms"; if(TiledX[0].ndims == 3){ cout << " nSlc: " << TiledX[tile].fbrIdx[0].size() << ", nFibers: " << TiledX[tile].fbrPtr[1].size() <<", nnz: " << TiledX[tile].totNnz; cout << endl; } else if(TiledX[0].ndims == 4){ cout << " nSlc: " << TiledX[tile].fbrIdx[0].size() << ", nSFibers: " << TiledX[tile].fbrPtr[1].size() << ", nFibers: " << TiledX[tile].fbrPtr[2].size() <<", nnz: " << TiledX[tile].totNnz; cout << endl; } } } allModeGPUTime += GPUTime; cout << "ONE-B-CSF-GPU-mode " << MTTKRPmode <<" :" << GPUTime << "," << endl; for (int bin = 0; bin < Opt.nBin; ++bin) cudaStreamDestroy(streams[bin]); // check correctness checkCuda(cudaMemcpy(&U[mode].vals[0], dU + mtxLoc, U[mode].nRows * U[mode].nCols * sizeof(DTYPE), cudaMemcpyDeviceToHost), 0); cudaFree(dVals); cudaFree(dU); //cudaFree(dU1); cudaFree(dU2); cudaFree(dU3); cudaFree(dfbrIdx0); cudaFree(dInds2); cudaFree(dInds3); cudaFree(dfbrIdx0); cudaFree(dfbrIdx1); cudaFree(dFbrIdx2); cudaFree(dfbrPtr0); cudaFree(dfbrPtr1); cudaFree(dFbrPtr2); cudaFree(dFbrLikeSlcInds); return 0; } int MTTKRP_HYB_GPU(const HYBTensor &HybX, Matrix *U, const Options &Opt){ //allocate and memcpy GPU memory //Tensor ITYPE *dCOOInds0, *dCOOInds1, *dCOOInds2, *dCOOInds3; ITYPE *dCSLSlcPtr, *dCSLSlcInds, *dCSLInds1, *dCSLInds2, *dCSLSlcMapperBin; ITYPE *dfbrPtr0, *dfbrIdx0, *dInds2, *dInds3, *dfbrPtr1, *dfbrIdx1, *dFbrPtr2, *dFbrIdx2, *dSlcMapperBin; DTYPE *dVals, *dCOOVals, *dCSLVals; ITYPE dLoc = 0, dSlcLoc = 0, dSlcIdxLoc = 0, dFbrLoc =0, dFbrIdxLoc =0, dBinLoc = 0, dCSLBinLoc = 0, dFbrLoc2 =0; int warpPerSlice = Opt.warpPerSlice; int logOfWarpPerSlice = log2(Opt.warpPerSlice); int TbPerSlc = 1; int logOfTPS = log2(TbPerSlc); // All tile same mode ITYPE mode0 = HybX.modeOrder[0]; ITYPE mode1 = HybX.modeOrder[1]; ITYPE mode2 = HybX.modeOrder[2]; ITYPE mode3 =((HybX.ndims == 4) ? HybX.modeOrder[3] : 0) ; // ****** mem op HYB COO ******* if(HybX.COOnnz > 0){ checkCuda(cudaMalloc((void**) &dCOOVals, HybX.COOnnz * sizeof(DTYPE)), 0); checkCuda(cudaMalloc((void**) &dCOOInds0, HybX.COOnnz * sizeof(ITYPE)), 0); checkCuda(cudaMalloc((void**) &dCOOInds1, HybX.COOnnz * sizeof(ITYPE)), 0); checkCuda(cudaMalloc((void**) &dCOOInds2, HybX.COOnnz * sizeof(ITYPE)), 0); checkCuda(cudaMemcpy(dCOOVals, &(HybX.COOvals[0]), HybX.COOnnz * sizeof(DTYPE),cudaMemcpyHostToDevice), 0); checkCuda(cudaMemcpy(dCOOInds0, &(HybX.COOinds[mode0][0]), HybX.COOnnz * sizeof(ITYPE),cudaMemcpyHostToDevice), 0); checkCuda(cudaMemcpy(dCOOInds1, &(HybX.COOinds[mode1][0]), HybX.COOnnz * sizeof(ITYPE) ,cudaMemcpyHostToDevice), 0); checkCuda(cudaMemcpy(dCOOInds2, &(HybX.COOinds[mode2][0]), HybX.COOnnz * sizeof(ITYPE),cudaMemcpyHostToDevice), 0); if(HybX.ndims == 4){ checkCuda(cudaMalloc((void**) &dCOOInds3, HybX.COOnnz * sizeof(ITYPE)), 0); checkCuda(cudaMemcpy(dCOOInds3, &(HybX.COOinds[mode3][0]), HybX.COOnnz * sizeof(ITYPE),cudaMemcpyHostToDevice), 0); } } // ****** mem op HYB CSL ******* if(HybX.CSLnnz > 0){ checkCuda(cudaMalloc((void**) &dCSLVals, HybX.CSLnnz * sizeof(DTYPE)), 0); checkCuda(cudaMalloc((void**) &dCSLSlcPtr, HybX.CSLslicePtr.size() * sizeof(ITYPE)), 0); checkCuda(cudaMalloc((void**) &dCSLSlcInds, HybX.CSLsliceIdx.size() * sizeof(ITYPE)), 0); checkCuda(cudaMalloc((void**) &dCSLInds1, HybX.CSLnnz * sizeof(ITYPE)), 0); checkCuda(cudaMalloc((void**) &dCSLInds2, HybX.CSLnnz * sizeof(ITYPE)), 0); checkCuda(cudaMalloc((void**) &dCSLSlcMapperBin, HybX.CSLslicePtr.size() * sizeof(ITYPE)), 0); checkCuda(cudaMemcpy(dCSLVals, &(HybX.CSLvals[0]), HybX.CSLnnz * sizeof(DTYPE),cudaMemcpyHostToDevice), 0); checkCuda(cudaMemcpy(dCSLSlcPtr + dSlcLoc, &(HybX.CSLslicePtr[0]), HybX.CSLslicePtr.size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0); checkCuda(cudaMemcpy(dCSLSlcInds + dSlcIdxLoc, &(HybX.CSLsliceIdx[0]), HybX.CSLsliceIdx.size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0); checkCuda(cudaMemcpy(dCSLInds1, &(HybX.CSLinds[mode1][0]), HybX.CSLnnz * sizeof(ITYPE) ,cudaMemcpyHostToDevice), 0); checkCuda(cudaMemcpy(dCSLInds2, &(HybX.CSLinds[mode2][0]), HybX.CSLnnz * sizeof(ITYPE),cudaMemcpyHostToDevice), 0); dCSLBinLoc = 0; for (int bin = 0; bin < Opt.nBin; ++bin){ if(bin > 0) dCSLBinLoc += HybX.CSLslcMapperBin[bin-1].size(); if(HybX.CSLslcMapperBin[bin].size() > 0) checkCuda(cudaMemcpy(dCSLSlcMapperBin + dSlcIdxLoc + dCSLBinLoc, &(HybX.CSLslcMapperBin[bin][0]), HybX.CSLslcMapperBin[bin].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0); } } // ****** mem op HYB HCSR ******* if(HybX.HCSRnnz > 0){ checkCuda(cudaMalloc((void**) &dVals, HybX.HCSRnnz * sizeof(DTYPE)), 0); checkCuda(cudaMalloc((void**) &dfbrPtr0, HybX.fbrPtr[0].size() * sizeof(ITYPE)), 0); checkCuda(cudaMalloc((void**) &dfbrIdx0, HybX.fbrIdx[0].size() * sizeof(ITYPE)), 0); checkCuda(cudaMalloc((void**) &dSlcMapperBin, HybX.fbrPtr[0].size() * sizeof(ITYPE)), 0); checkCuda(cudaMalloc((void**) &dfbrPtr1, HybX.fbrPtr[1].size() * sizeof(ITYPE)), 0); checkCuda(cudaMalloc((void**) &dfbrIdx1, HybX.fbrPtr[1].size() * sizeof(ITYPE)), 0); checkCuda(cudaMemcpy(dVals, &(HybX.vals[0]), HybX.HCSRnnz * sizeof(DTYPE),cudaMemcpyHostToDevice), 0); checkCuda(cudaMemcpy(dfbrPtr0, &(HybX.fbrPtr[0][0]), HybX.fbrPtr[0].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0); checkCuda(cudaMemcpy(dfbrIdx0, &(HybX.fbrIdx[0][0]), HybX.fbrIdx[0].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0); checkCuda(cudaMemcpy(dfbrPtr1, &(HybX.fbrPtr[1][0]), HybX.fbrPtr[1].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0); checkCuda(cudaMemcpy(dfbrIdx1, &(HybX.fbrIdx[1][0]), HybX.fbrPtr[1].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0); if(HybX.ndims == 3){ checkCuda(cudaMalloc((void**) &dInds2, HybX.HCSRnnz * sizeof(ITYPE)), 0); checkCuda(cudaMemcpy(dInds2, &(HybX.inds[mode2][0]), HybX.HCSRnnz * sizeof(ITYPE),cudaMemcpyHostToDevice), 0); } if(HybX.ndims == 4){ checkCuda(cudaMalloc((void**) &dFbrIdx2, HybX.fbrIdx[2].size() * sizeof(ITYPE)), 0); checkCuda(cudaMalloc((void**) &dFbrPtr2, HybX.fbrPtr[2].size() * sizeof(ITYPE)), 0); checkCuda(cudaMalloc((void**) &dInds3, HybX.HCSRnnz * sizeof(ITYPE)), 0); checkCuda(cudaMemcpy(dFbrPtr2, &(HybX.fbrPtr[2][0]), HybX.fbrPtr[2].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0); checkCuda(cudaMemcpy(dFbrIdx2, &(HybX.fbrIdx[2][0]), HybX.fbrIdx[2].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0); checkCuda(cudaMemcpy(dInds3, &(HybX.inds[mode3][0]), HybX.HCSRnnz * sizeof(ITYPE),cudaMemcpyHostToDevice), 0); } dBinLoc = 0; for (int bin = 0; bin < Opt.nBin; ++bin){ if(bin > 0) dBinLoc += HybX.slcMapperBin[bin-1].size(); if(HybX.slcMapperBin[bin].size() > 0) checkCuda(cudaMemcpy(dSlcMapperBin + dSlcIdxLoc + dBinLoc, &(HybX.slcMapperBin[bin][0]), HybX.slcMapperBin[bin].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0); } } // //Matrices DTYPE *dU0, *dU1, *dU2, *dU3; checkCuda(cudaMalloc((void**) &dU0, U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE)), 0); checkCuda(cudaMalloc((void**) &dU1, U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE)), 0); checkCuda(cudaMalloc((void**) &dU2, U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE)), 0); cudaMemset(dU0, 0, U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE)); checkCuda(cudaMemcpy(dU1, &(U[mode1].vals[0]), U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice), 0); checkCuda(cudaMemcpy(dU2, &(U[mode2].vals[0]), U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice), 0); if(HybX.ndims == 4){ checkCuda(cudaMalloc((void**) &dU3, U[mode3].nRows * U[mode3].nCols * sizeof(DTYPE)), 0); checkCuda(cudaMemcpy(dU3, &(U[mode3].vals[0]), U[mode3].nRows * U[mode3].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice), 0); } // BLOCK and GRID int BLOCKSIZE = 512; dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1); unsigned int rowInATB = BLOCKSIZE / (Opt.warpPerSlice*32); if(Opt.warpPerSlice * 32 > BLOCKSIZE){ cout << "BLOCKSIZE is smaller than work per slice! Increase BLOCKSIZE." << endl; exit(0); } cudaEvent_t start, stop, HYBstart, HYBstop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventCreate(&HYBstart); cudaEventCreate(&HYBstop); cudaStream_t streams[2 * Opt.nBin + 1]; for (int bin = 0; bin < 2 * Opt.nBin + 1; ++bin) cudaStreamCreate(&streams[bin]); float mili = 0, HYBmili =0, GPUTime = 0, CPUtimer = 0, HYBTime = 0; dLoc = 0, dSlcLoc = 0, dSlcIdxLoc = 0; dFbrLoc =0; bool useLoop = false; if(useLoop) grid.x = 32768*2; // mili = 0; dCSLBinLoc = 0; dBinLoc = 0; int smallBinEndsAt = 5; int slcPerTb = 0; cuda_timer_start(HYBstart); // ******* CUDA COO ******* // if(HybX.COOnnz > 0){ // BLOCKSIZE = 128; // block.x = BLOCKSIZE; // // /* Like PARTI loop */ = // if(!useLoop) // grid.x = (32 * HybX.COOnnz + BLOCKSIZE - 1) / BLOCKSIZE; // if(Opt.verbose) // cuda_timer_start(start); // if(!useLoop){ // if(HybX.ndims == 3) // mttkrp_HYB_COO_kernel<<<grid, block, 0, 0>>>(dCOOVals, dCOOInds0, dCOOInds1, dCOOInds2, HybX.COOnnz, dU0, dU1, dU2, Opt.mode, Opt.R); // else if (HybX.ndims == 4) // mttkrp_HYB_COO_kernel_4D<<<grid, block, 0, 0>>>(dCOOVals, dCOOInds0, dCOOInds1, dCOOInds2,dCOOInds3, HybX.COOnnz, dU0, dU1, dU2, dU3, Opt.mode, Opt.R); // } // else{ // if(HybX.ndims == 3) // mttkrp_HYB_COO_kernel_loop<<<grid, block, 0, 0>>>(dCOOVals, dCOOInds0, dCOOInds1, dCOOInds2, HybX.COOnnz, dU0, dU1, dU2, Opt.mode, Opt.R); // else if (HybX.ndims == 4) // mttkrp_HYB_COO_kernel_4D_loop<<<grid, block, 0, 0>>>(dCOOVals, dCOOInds0, dCOOInds1, dCOOInds2,dCOOInds3, HybX.COOnnz, dU0, dU1, dU2, dU3, Opt.mode, Opt.R); // } // if(Opt.verbose){ // cuda_timer_stop(start, stop, mili); // HYBTime += mili; // cout << "HYB-COO GPU " << mili << "ms"<< endl; // } // } // ******* CUDA CSL ******* // if(HybX.CSLnnz > 0 || HybX.HCSRnnz > 0) { if(HybX.COOnnz > 0){ BLOCKSIZE = 128; block.x = 128; grid.x = (32 * HybX.COOnnz + BLOCKSIZE - 1) / BLOCKSIZE; if(HybX.ndims == 3) mttkrp_HYB_COO_kernel<<<grid, block, 0, 0>>>(dCOOVals, dCOOInds0, dCOOInds1, dCOOInds2, HybX.COOnnz, dU0, dU1, dU2, Opt.mode, Opt.R); else if (HybX.ndims == 4) mttkrp_HYB_COO_kernel_4D<<<grid, block, 0, 0>>>(dCOOVals, dCOOInds0, dCOOInds1, dCOOInds2,dCOOInds3, HybX.COOnnz, dU0, dU1, dU2, dU3, Opt.mode, Opt.R); } BLOCKSIZE = 512; block.x = BLOCKSIZE; for (int bin = 0; bin < Opt.nBin ; ++bin){ dBinLoc += ((bin > 0) ? HybX.slcMapperBin[bin-1].size() : 0); dCSLBinLoc += ((bin > 0) ? HybX.CSLslcMapperBin[bin-1].size() : 0); if( HybX.slcMapperBin[bin].size() == 0 && HybX.CSLslcMapperBin[bin].size() == 0) continue; // Processing small bin.. merged to one. 1 WARP slice if(bin < smallBinEndsAt){ warpPerSlice = 1; logOfWarpPerSlice = 0;//log2(warpPerSlice); slcPerTb = 16 / warpPerSlice; /* CSL small bin */ if(HybX.CSLnnz > 0){ grid.x = ( warpPerSlice * 32 * HybX.CSLslcMapperBin[bin].size() + BLOCKSIZE - 1) / BLOCKSIZE; mttkrp_CSL_kernel_bin<<<grid, block, 0, streams[1]>>>(dCSLVals, dCSLSlcInds, dCSLSlcMapperBin + dCSLBinLoc, dCSLInds2, dCSLSlcPtr, dCSLInds1, HybX.CSLslcMapperBin[bin].size(), dU0, dU1, dU2, Opt.mode, Opt.R, warpPerSlice, logOfWarpPerSlice); } /* HCSR small bin */ if(HybX.HCSRnnz > 0){ grid.x = ( warpPerSlice * 32 * HybX.slcMapperBin[bin].size() + BLOCKSIZE - 1) / BLOCKSIZE; if(HybX.ndims == 3) mttkrp_HCSR_kernel_smllBin<<<grid, block, 0, streams[2]>>>(dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc, dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, HybX.slcMapperBin[bin].size(), dU0, dU1, dU2, Opt.mode, Opt.R, warpPerSlice, logOfWarpPerSlice, TbPerSlc, logOfTPS); else if(HybX.ndims == 4) mttkrp_HCSR_kernel_smllBin_4D<<<grid, block, 0, streams[2]>>>(dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc, dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, HybX.slcMapperBin[bin].size(), dU0, dU1, dU2, dU3, Opt.mode, Opt.R, warpPerSlice, logOfWarpPerSlice, TbPerSlc, logOfTPS); } } // Processing heavy bin.. multiple TB per slice else{ TbPerSlc = 1 << (bin - smallBinEndsAt + 1); // 1st big bin starts with 1 TB 1 << 1 not 1 << 5 if(TbPerSlc > 32) TbPerSlc = 32; logOfTPS = log2(TbPerSlc); warpPerSlice = 16; logOfWarpPerSlice = 4; /* CSL big bin */ if(HybX.CSLnnz > 0){ grid.x = (TbPerSlc * warpPerSlice * 32 * HybX.CSLslcMapperBin[bin].size() + BLOCKSIZE - 1) / BLOCKSIZE; mttkrp_CSL_kernel_hvyBin<<<grid, block, 0, streams[bin+1]>>>(dCSLVals + dLoc, dCSLSlcInds + dSlcIdxLoc, dCSLSlcMapperBin + dSlcIdxLoc + dCSLBinLoc, dCSLInds2 + dLoc, dCSLSlcPtr + dSlcLoc, dCSLInds1, HybX.CSLslcMapperBin[bin].size(), dU0, dU1, dU2, Opt.mode, Opt.R, warpPerSlice, logOfWarpPerSlice, TbPerSlc, logOfTPS); } /* HCSR big bin */ if(HybX.HCSRnnz > 0){ grid.x = (TbPerSlc * warpPerSlice * 32 * HybX.slcMapperBin[bin].size() + BLOCKSIZE - 1) / BLOCKSIZE; if(HybX.ndims == 3) mttkrp_HCSR_kernel_hvyBin<<<grid, block, 0, streams[bin+2]>>>(dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc, dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, HybX.slcMapperBin[bin].size(), dU0, dU1, dU2, Opt.mode, Opt.R, warpPerSlice, logOfWarpPerSlice, TbPerSlc, logOfTPS); else if(HybX.ndims == 4) mttkrp_HCSR_kernel_hvyBin_4D<<<grid, block, 0, streams[bin + 2]>>>(dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc, dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, HybX.slcMapperBin[bin].size(), dU0, dU1, dU2, dU3, Opt.mode, Opt.R, warpPerSlice, logOfWarpPerSlice, TbPerSlc, logOfTPS); } } } // if(Opt.verbose){ // cuda_timer_stop(start, stop, mili); // HYBTime += mili; // cout << "CSL+HCSR GPU-time: " << mili << "ms"<< endl; // } } cuda_timer_stop(HYBstart, HYBstop, HYBmili); if(Opt.verbose) cout << "verbose on. HYB GPU: " << HYBmili << endl; else cout << "HYB GPU: " << HYBmili << endl; for (int bin = 0; bin < 2 * Opt.nBin + 1; ++bin) cudaStreamDestroy(streams[bin]); // check correctness checkCuda(cudaMemcpy(&U[mode0].vals[0], dU0, U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE), cudaMemcpyDeviceToHost), 0); cudaFree(dVals); cudaFree(dCOOVals); cudaFree(dCSLVals); cudaFree(dU0); cudaFree(dU1); cudaFree(dU2); cudaFree(dfbrIdx0); cudaFree(dInds2); cudaFree(dInds3); cudaFree(dfbrIdx0); cudaFree(dfbrIdx1); cudaFree(dFbrIdx2); cudaFree(dfbrPtr0); cudaFree(dfbrPtr1); cudaFree(dFbrPtr2); cudaFree(dCSLInds1); cudaFree(dCSLInds2); cudaFree(dCSLSlcPtr); cudaFree(dCSLSlcInds); cudaFree(dCOOInds0); cudaFree(dCOOInds1); cudaFree(dCOOInds2); return 0; } int MTTKRP_ONE_HCSR_GPU(TiledTensor *TiledX, Matrix *U, const Options &Opt){ bool performMTTKRPMode = true, performMTTKRPnMode = true, performMTTKRPnnMode = true; /* Allocate and memcpy GPU memory */ //Tensor ITYPE *dInds2, *dInds3, *dfbrPtr0, *dfbrIdx0, *dfbrPtr1, *dfbrIdx1, *dFbrPtr2, *dFbrIdx2, *dSlcMapperBin, *dFbrLikeSlcInds; DTYPE *dVals; ITYPE dLoc = 0, dSlcLoc = 0, dSlcIdxLoc = 0, dFbrLoc =0, dFbrIdxLoc =0, dBinLoc = 0, dFbrLoc2 =0, dFbrLikeSlcIndsLoc = 0; ITYPE totNnz = 0, totSlcPtr = 0, totSlcIdx = 0, totFbrPtr = 0, totFbrIdx = 0, totFbrPtr2 = 0; // // All tile same mode ITYPE mode0 = 0;//TiledX[0].modeOrder[0]; ITYPE mode1 = 1;//TiledX[0].modeOrder[1]; ITYPE mode2 = 2;//TiledX[0].modeOrder[2]; ITYPE mode3 = 3;//((TiledX[0].ndims == 4) ? TiledX[0].modeOrder[3] : 0) ; ITYPE R = Opt.R; for (int tile = 0; tile < Opt.nTile; ++tile){ totNnz += TiledX[tile].totNnz; totSlcPtr += TiledX[tile].fbrPtr[0].size() ; totSlcIdx += TiledX[tile].fbrIdx[0].size() ; totFbrPtr += TiledX[tile].fbrPtr[1].size() ; totFbrIdx += TiledX[tile].fbrIdx[1].size() ; totFbrPtr2 += ((TiledX[tile].ndims == 4) ? TiledX[tile].fbrPtr[2].size() : 0) ; } double t0 = seconds(); checkCuda(cudaMalloc((void**) &dVals, totNnz * sizeof(DTYPE)), 0); checkCuda(cudaMalloc((void**) &dfbrPtr0, totSlcPtr * sizeof(ITYPE)), 0); checkCuda(cudaMalloc((void**) &dfbrIdx0, totSlcIdx * sizeof(ITYPE)), 0); checkCuda(cudaMalloc((void**) &dSlcMapperBin, totSlcPtr * sizeof(ITYPE)), 0); checkCuda(cudaMalloc((void**) &dfbrPtr1, totFbrPtr * sizeof(ITYPE)), 0); checkCuda(cudaMalloc((void**) &dfbrIdx1, totFbrIdx * sizeof(ITYPE)), 0); checkCuda(cudaMalloc((void**) &dFbrLikeSlcInds, totFbrIdx * sizeof(ITYPE)), 0); if(TiledX[0].ndims == 3) checkCuda(cudaMalloc((void**) &dInds2, totNnz * sizeof(ITYPE)), 0); if(TiledX[0].ndims == 4){ checkCuda(cudaMalloc((void**) &dFbrIdx2, totFbrPtr2 * sizeof(ITYPE)), 0); checkCuda(cudaMalloc((void**) &dFbrPtr2, totFbrPtr2 * sizeof(ITYPE)), 0); checkCuda(cudaMalloc((void**) &dInds3, totNnz * sizeof(ITYPE)), 0); } /* cuda memcopy for tiled parts*/ for (int tile = 0; tile < Opt.nTile; ++tile){ if(tile > 0) { dLoc += TiledX[tile-1].totNnz; dSlcLoc += TiledX[tile - 1].fbrPtr[0].size(); // all tile same dSlcIdxLoc += TiledX[tile - 1].fbrIdx[0].size(); dFbrLoc += TiledX[tile - 1].fbrPtr[1].size(); dFbrIdxLoc += TiledX[tile - 1].fbrIdx[1].size(); dFbrLoc2 += ((TiledX[tile].ndims == 4) ? TiledX[tile - 1].fbrPtr[2].size() : 0) ; } checkCuda(cudaMemcpy(dVals + dLoc, &(TiledX[tile].vals[0]), TiledX[tile].totNnz * sizeof(DTYPE),cudaMemcpyHostToDevice), 0); checkCuda(cudaMemcpy(dfbrPtr0 + dSlcLoc, &(TiledX[tile].fbrPtr[0][0]), TiledX[tile].fbrPtr[0].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0); checkCuda(cudaMemcpy(dfbrIdx0 + dSlcIdxLoc, &(TiledX[tile].fbrIdx[0][0]), TiledX[tile].fbrIdx[0].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0); checkCuda(cudaMemcpy(dfbrPtr1 + dFbrLoc, &(TiledX[tile].fbrPtr[1][0]), TiledX[tile].fbrPtr[1].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0); checkCuda(cudaMemcpy(dfbrIdx1 + dFbrIdxLoc, &(TiledX[tile].fbrIdx[1][0]), TiledX[tile].fbrIdx[1].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0); checkCuda(cudaMemcpy(dFbrLikeSlcInds + dFbrIdxLoc, &(TiledX[tile].fbrLikeSlcInds[0]), TiledX[tile].fbrIdx[1].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0); if(TiledX[tile].ndims == 3) checkCuda(cudaMemcpy(dInds2 + dLoc, &(TiledX[tile].inds[TiledX[tile].modeOrder[2]][0]), TiledX[tile].totNnz * sizeof(ITYPE),cudaMemcpyHostToDevice), 0); if(TiledX[tile].ndims == 4){ checkCuda(cudaMemcpy(dFbrPtr2 + dFbrLoc2, &(TiledX[tile].fbrPtr[2][0]), TiledX[tile].fbrPtr[2].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0); checkCuda(cudaMemcpy(dFbrIdx2 + dFbrLoc2, &(TiledX[tile].fbrIdx[2][0]), TiledX[tile].fbrIdx[2].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0); checkCuda(cudaMemcpy(dInds3 + dLoc, &(TiledX[tile].inds[TiledX[0].modeOrder[3]][0]), TiledX[tile].totNnz * sizeof(ITYPE),cudaMemcpyHostToDevice), 0); } dBinLoc = 0; for (int bin = 0; bin < Opt.nBin; ++bin){ if(bin > 0) dBinLoc += TiledX[tile].slcMapperBin[bin-1].size(); checkCuda(cudaMemcpy(dSlcMapperBin + dSlcIdxLoc + dBinLoc, &(TiledX[tile].slcMapperBin[bin][0]), TiledX[tile].slcMapperBin[bin].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0); } } float tnsMemcpyTime = seconds() - t0; t0 = seconds(); unsigned int *dULoc = new unsigned int[TiledX[0].ndims]; unsigned int *szDU = new unsigned int[TiledX[0].ndims]; // //Matrices DTYPE *dU;// *dU0, *dU1, *dU2, *dU3; ITYPE mtxSize = ((TiledX[0].ndims == 3) ? (U[mode0].nRows + U[mode1].nRows + U[mode2].nRows) * U[mode0].nCols : (U[mode0].nRows + U[mode1].nRows + U[mode2].nRows + U[mode3].nRows) * U[mode0].nCols ); checkCuda(cudaMalloc((void**) &dU, mtxSize * sizeof(DTYPE)), 0); for (int m = 0; m < TiledX[0].ndims; ++m) szDU[m] = U[m].nRows * U[m].nCols; cudaMemset(dU+0, 0, U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE)); checkCuda(cudaMemcpy(dU + szDU[0], &(U[mode1].vals[0]), U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice), 0); checkCuda(cudaMemcpy(dU + szDU[0] + szDU[1], &(U[mode2].vals[0]), U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice), 0); float mtxMemcpyTime = seconds() - t0; // cout << "tns and mtx memcopy time: " << tnsMemcpyTime <<", " << mtxMemcpyTime<< endl; if(TiledX[0].ndims == 4) checkCuda(cudaMemcpy(dU + szDU[0] + szDU[1] + szDU[2], &(U[mode3].vals[0]), U[mode3].nRows * U[mode3].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice), 0); // BLOCK and GRID int BLOCKSIZE = 512; unsigned int rowInATB = BLOCKSIZE / (Opt.warpPerSlice*32); // if(Opt.warpPerSlice * 32 > BLOCKSIZE){ // cout << "BLOCKSIZE is smaller than work per slice! Increase BLOCKSIZE." << endl; // exit(0); // } cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaStream_t streams[Opt.nBin]; float mili = 0, GPUTime = 0, CPUtimer = 0, allModeGPUTime = 0; int smallBinEndsAt = 5; /* Warp per slice and threadblock per size */ int *warpPerSlc = new int[Opt.nBin]; int *logOfWarpPerSlc = new int[Opt.nBin]; int *TbPerSlc = new int[Opt.nBin]; int *logOfTbPerSlc = new int[Opt.nBin]; for (int bin = 0; bin < Opt.nBin ; ++bin){ TbPerSlc[bin] = 1; warpPerSlc[bin] = ((bin > 0) ? 2 << (bin - 1) : 1); if(warpPerSlc[bin] > 16) warpPerSlc[bin] = 16; logOfWarpPerSlc[bin] = log2(warpPerSlc[bin]); TbPerSlc[bin] = 1; logOfTbPerSlc[bin] = 0; if (bin >= smallBinEndsAt){ TbPerSlc[bin] = 1 << (bin - smallBinEndsAt + 1); // 1st big bin starts with 1 TB 1 << 1 not 1 << 5 if(TbPerSlc[bin] > 32) TbPerSlc[bin] = 32; logOfTbPerSlc[bin] = log2(TbPerSlc[bin]); warpPerSlc[bin] = 16; logOfWarpPerSlc[bin] = 4; } } // TBD: change warpPerSlc to warpPerSlc[bin] and all int slcPerTb = 1; dLoc = 0, dSlcLoc = 0, dSlcIdxLoc = 0; dFbrLoc =0, dFbrIdxLoc = 0, dFbrLoc2= 0, dFbrLikeSlcIndsLoc = 0; for (int bin = 0; bin < Opt.nBin; ++bin) cudaStreamCreate(&streams[bin]); /*MTTKRP on Opt.mode*/ unsigned int dU0Loc, dU1Loc, dU2Loc , dU3Loc; /* matrix order according to mode order*/ for (int m = 0; m < TiledX[0].ndims; ++m){ int curMode = TiledX[0].modeOrder[m]; dULoc[m] = 0; for (int q = 0; q < curMode; ++q){ dULoc[m] += szDU[q % TiledX[0].ndims]; //1 2 3 0 } } for (int MTTKRPmode = 0; MTTKRPmode < TiledX[0].ndims; ++MTTKRPmode){ if(MTTKRPmode > 0){ mili = 0; GPUTime = 0; CPUtimer = 0; dLoc = 0; dSlcLoc = 0; dSlcIdxLoc = 0; dFbrLoc =0; dFbrIdxLoc = 0; dFbrLoc2= 0, dFbrLikeSlcIndsLoc = 0; // MTTKRP on mode mode 0 changed DU0. To pass correctness for now initializing to 2 again. int mode = MTTKRPmode - 1; for(long r = 0; r < U[mode].nRows; ++r){ for(long c = 0; c < U[mode].nCols; ++c) // or u[mode].nCols U[mode].vals[r * U[mode].nCols + c] = mode + .5;// 0.1 * drand48(); //1 ;//(r * R + c + 1); // } if(MTTKRPmode == 1){ checkCuda(cudaMemcpy(dU + 0, &(U[mode0].vals[0]), U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice), 0); cudaMemset(dU + szDU[0], 0, U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE)); } else if(MTTKRPmode == 2){ checkCuda(cudaMemcpy(dU + szDU[0], &(U[mode1].vals[0]), U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice), 0); cudaMemset(dU + szDU[0] + szDU[1], 0, U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE)); } else if(MTTKRPmode == 3){ checkCuda(cudaMemcpy(dU + szDU[0] + szDU[1] , &(U[mode2].vals[0]), U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice), 0); cudaMemset(dU + szDU[0] + szDU[1] + szDU[2], 0, U[mode3].nRows * U[mode3].nCols * sizeof(DTYPE)); } } if(performMTTKRPMode && TiledX[0].modeOrder[0] == MTTKRPmode){ // if(Opt.verbose) cout << "Slc atomics - " ; for (int tile = 0; tile < Opt.nTile; ++tile){ dBinLoc = 0; if(tile > 0) { dLoc += TiledX[tile-1].totNnz; dSlcLoc += TiledX[tile - 1].fbrPtr[0].size(); dSlcIdxLoc += TiledX[tile - 1].fbrIdx[0].size(); dFbrLoc += TiledX[tile - 1].fbrPtr[1].size(); dFbrIdxLoc += TiledX[tile - 1].fbrIdx[1].size(); dFbrLoc2 += ((TiledX[0].ndims == 4) ? TiledX[tile - 1].fbrPtr[2].size() : 0) ; } BLOCKSIZE = Opt.TBsize; dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1); int smallBinEndsAt = 5; int slcPerTb = 0; int warpPerFbr =Opt.warpPerSlice;//4;//; BLOCKSIZE/32;//1;// int logOfWarpPerFbr = log2(warpPerFbr); int bin = 0; bool useLoop = false; int fbrPerWarp = Opt.fiberPerWarp;//1;//BLOCKSIZE/32; // dont overflow TB int logOfFbrPerWarp = log2(fbrPerWarp ); // int fbrPerWarp = 1;//BLOCKSIZE/32; // dont overflow TB // int logOfFbrPerWarp = log2(fbrPerWarp ); if( (warpPerFbr > (BLOCKSIZE/32)) || (fbrPerWarp > (BLOCKSIZE/32)) ){ cout << "warpPerFbr (-w) or fbrPerWarp (-s) cannot be higher than threadblock size!" << endl << "hint: increase -b!" << endl; exit(0); } /* Like PARTI loop */ if(useLoop) grid.x = Opt.gridSize;// 32768*16; else grid.x = ( warpPerFbr * 32 * ((TiledX[tile].nFibers+fbrPerWarp-1)/fbrPerWarp) + BLOCKSIZE - 1) / BLOCKSIZE; double t0 = seconds(); cuda_timer_start(start); if(TiledX[0].ndims == 3) mttkrp_MIHCSR_kernel_slc_atomic_fbrLvlPar<<<grid, block, 0, 0>>>(dVals + dLoc, dFbrLikeSlcInds + dFbrLoc, dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, TiledX[tile].nFibers, dU + dULoc[0], dU + dULoc[1], dU + dULoc[2], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr, fbrPerWarp, logOfFbrPerWarp); else if(TiledX[0].ndims == 4) mttkrp_MIHCSR_kernel_slc_atomic_fbrLvlPar_4D<<<grid, block, 0, 0>>>(dVals + dLoc, dFbrLikeSlcInds + dFbrLoc, dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, TiledX[tile].nFibers, dU + dULoc[0], dU + dULoc[1], dU + dULoc[2], dU + dULoc[3], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr, fbrPerWarp, logOfFbrPerWarp); cuda_timer_stop(start, stop, mili); CPUtimer += seconds() - t0; GPUTime += mili; if(Opt.verbose){ cout << "Tile: " << tile << " - time: " << mili << "ms"; cout <<" nnz: " << TiledX[tile].totNnz << " nFibers: " << TiledX[tile].fbrPtr[1].size() << " nSlc " << TiledX[tile].fbrIdx[0].size() << " "; cout << endl; } } allModeGPUTime += GPUTime; cout << "singleCSF-GPU-mode " << MTTKRPmode <<" :" << GPUTime << "," << endl; } /*processing fbrS level for 4D tensor*/ else if(TiledX[0].ndims == 4 && performMTTKRPnMode && TiledX[0].modeOrder[1] == MTTKRPmode){ // if(Opt.verbose) cout << "FbrS atomics - " ; mili = 0, GPUTime = 0, CPUtimer = 0; dLoc = 0; dSlcLoc = 0; dSlcIdxLoc = 0; dFbrLoc =0; dFbrIdxLoc = 0; dFbrLoc2= 0, dFbrLikeSlcIndsLoc = 0; for (int tile = 0; tile < Opt.nTile; ++tile){ dBinLoc = 0; if(tile > 0) { dLoc += TiledX[tile-1].totNnz; dSlcLoc += TiledX[tile - 1].fbrPtr[0].size(); dSlcIdxLoc += TiledX[tile - 1].fbrIdx[0].size(); dFbrLoc += TiledX[tile - 1].fbrPtr[1].size(); dFbrIdxLoc += TiledX[tile - 1].fbrIdx[1].size(); dFbrLoc2 += ((TiledX[0].ndims == 4) ? TiledX[tile - 1].fbrPtr[2].size() : 0) ; } // cout <<"might wanna change binning style and Block size, logWPC, COO like parallelism, allow mode sort" << endl; BLOCKSIZE = Opt.TBsize; dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1); int smallBinEndsAt = 5; int slcPerTb = 0; int warpPerFbr = Opt.warpPerSlice;//4;//;BLOCKSIZE/32;// if(warpPerFbr > (BLOCKSIZE/32)){ cout << "warpPerFbr (-w) cannot be higher than threadblock size! hint: increase -b!" << endl; exit(0); } int logOfWarpPerFbr = log2(warpPerFbr); int bin = 0; grid.x = ( warpPerFbr * 32 * TiledX[tile].nFibers + BLOCKSIZE - 1) / BLOCKSIZE; double t0 = seconds(); cuda_timer_start(start); mttkrp_MIHCSR_kernel_fbrS_atomic_fbrLvlPar_4D<<<grid, block, 0, streams[bin]>>>(dVals + dLoc, dFbrLikeSlcInds + dFbrLoc, dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, TiledX[tile].nFibers, dU + dULoc[1], dU + dULoc[2], dU + dULoc[3], dU + dULoc[0], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr); cuda_timer_stop(start, stop, mili); CPUtimer += seconds() - t0; GPUTime += mili; if(Opt.verbose){ cout << "Tile: " << tile << " - time: " << mili << "ms"; cout <<" nnz: " << TiledX[tile].totNnz << " nFibers: " << TiledX[tile].fbrPtr[1].size() << " nSlc " << TiledX[tile].fbrIdx[0].size() << " "; cout << endl; } } allModeGPUTime += GPUTime; cout << "singleCSF-GPU-mode " << MTTKRPmode <<" :" << GPUTime << "," << endl; } else if(performMTTKRPnMode && TiledX[0].modeOrder[TiledX[0].ndims-2] == MTTKRPmode){ // if(Opt.verbose) cout << "Fbr atomics - " ; mili = 0, GPUTime = 0, CPUtimer = 0; dLoc = 0; dSlcLoc = 0; dSlcIdxLoc = 0; dFbrLoc =0; dFbrIdxLoc = 0; dFbrLoc2= 0, dFbrLikeSlcIndsLoc = 0; for (int tile = 0; tile < Opt.nTile; ++tile){ dBinLoc = 0; if(tile > 0) { dLoc += TiledX[tile-1].totNnz; dSlcLoc += TiledX[tile - 1].fbrPtr[0].size(); dSlcIdxLoc += TiledX[tile - 1].fbrIdx[0].size(); dFbrLoc += TiledX[tile - 1].fbrPtr[1].size(); dFbrIdxLoc += TiledX[tile - 1].fbrIdx[1].size(); dFbrLoc2 += ((TiledX[0].ndims == 4) ? TiledX[tile - 1].fbrPtr[2].size() : 0) ; } // cout <<"might wanna change binning style and Block size, logWPC, COO like parallelism, allow mode sort" << endl; BLOCKSIZE = Opt.TBsize; dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1); int smallBinEndsAt = 5; int slcPerTb = 0; int warpPerFbr = Opt.warpPerSlice;//4;//;BLOCKSIZE/32;// if(warpPerFbr > (BLOCKSIZE/32)){ cout << "warpPerFbr (-w) cannot be higher than threadblock size! hint: increase -b!" << endl; exit(0); } int logOfWarpPerFbr = log2(warpPerFbr); int bin = 0; bool useLoop = false; // /* Like PARTI loop */ = if(useLoop) grid.x = Opt.gridSize;// 32768*16; else grid.x = ( warpPerFbr * 32 * TiledX[tile].nFibers + BLOCKSIZE - 1) / BLOCKSIZE; double t0 = seconds(); cuda_timer_start(start); if(useLoop) mttkrp_MIHCSR_kernel_fbr_atomic_fbrLvlPar_loop<<<grid, block, 0, streams[bin]>>>(dVals + dLoc, dFbrLikeSlcInds + dFbrLoc, dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, TiledX[tile].nFibers, dU + dULoc[1], dU + dULoc[2], dU + dULoc[0], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr); else{ if(TiledX[0].ndims == 3) mttkrp_MIHCSR_kernel_fbr_atomic_fbrLvlPar<<<grid, block, 0, streams[bin]>>>(dVals + dLoc, dFbrLikeSlcInds + dFbrLoc, dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, TiledX[tile].nFibers, dU + dULoc[1], dU + dULoc[2], dU + dULoc[0], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr); else if (TiledX[0].ndims == 4) mttkrp_MIHCSR_kernel_fbr_atomic_fbrLvlPar_4D<<<grid, block, 0, streams[bin]>>>(dVals + dLoc, dFbrLikeSlcInds + dFbrLoc, dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, TiledX[tile].nFibers, dU + dULoc[2], dU + dULoc[3], dU + dULoc[0], dU + dULoc[1], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr); } cuda_timer_stop(start, stop, mili); CPUtimer += seconds() - t0; GPUTime += mili; if(Opt.verbose){ cout << "Tile: " << tile << " - time: " << mili << "ms"; cout <<" nnz: " << TiledX[tile].totNnz << " nFibers: " << TiledX[tile].fbrPtr[1].size() << " nSlc " << TiledX[tile].fbrIdx[0].size() << " "; cout << endl; } } allModeGPUTime += GPUTime; cout << "singleCSF-GPU-mode " << MTTKRPmode <<" :" << GPUTime << "," << endl; } else if(performMTTKRPnnMode && TiledX[0].modeOrder[TiledX[0].ndims-1] == MTTKRPmode){ // if(Opt.verbose) cout << "Nnz atomics - " ; mili = 0, GPUTime = 0, CPUtimer = 0; dLoc = 0; dSlcLoc = 0; dSlcIdxLoc = 0; dFbrLoc =0; dFbrIdxLoc = 0; dFbrLoc2= 0, dFbrLikeSlcIndsLoc = 0; for (int tile = 0; tile < Opt.nTile; ++tile){ dBinLoc = 0; if(tile > 0) { dLoc += TiledX[tile-1].totNnz; dSlcLoc += TiledX[tile - 1].fbrPtr[0].size(); dSlcIdxLoc += TiledX[tile - 1].fbrIdx[0].size(); dFbrLoc += TiledX[tile - 1].fbrPtr[1].size(); dFbrIdxLoc += TiledX[tile - 1].fbrIdx[1].size(); dFbrLoc2 += ((TiledX[0].ndims == 4) ? TiledX[tile - 1].fbrPtr[2].size() : 0) ; } BLOCKSIZE = Opt.TBsize; dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1); bool useLoop = false; int smallBinEndsAt = 5; int slcPerTb = 0; int warpPerFbr = Opt.warpPerSlice;//4;//;BLOCKSIZE/32;// if(warpPerFbr > (BLOCKSIZE/32)){ cout << "warpPerFbr (-w) cannot be higher than threadblock size! hint: increase -b!" << endl; exit(0); } int logOfWarpPerFbr = log2(warpPerFbr); int bin = 0; // /* Like PARTI loop */ = if(useLoop) grid.x = Opt.gridSize;// 32768; else grid.x = ( warpPerFbr * 32 * TiledX[tile].nFibers + BLOCKSIZE - 1) / BLOCKSIZE; int dloc = 0; double t0 = seconds(); cuda_timer_start(start); if(useLoop) mttkrp_MIHCSR_kernel_all_atomic_fbrLvlPar_loop<<<grid, block, 0, streams[bin]>>>(dVals + dLoc, dFbrLikeSlcInds + dFbrLoc, dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, TiledX[tile].nFibers, dU + dULoc[2], dU + dULoc[0], dU + dULoc[1], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr); else{ if (TiledX[0].ndims == 3) mttkrp_MIHCSR_kernel_all_atomic_fbrLvlPar<<<grid, block, 0, streams[bin]>>>(dVals + dLoc, dFbrLikeSlcInds + dFbrLoc, dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, TiledX[tile].nFibers, dU + dULoc[2], dU + dULoc[0], dU + dULoc[1], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr); else if (TiledX[0].ndims == 4) mttkrp_MIHCSR_kernel_all_atomic_fbrLvlPar_4D<<<grid, block, 0, streams[bin]>>>(dVals + dLoc, dFbrLikeSlcInds + dFbrLoc, dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, TiledX[tile].nFibers, dU + dULoc[3], dU + dULoc[0], dU + dULoc[1], dU + dULoc[2], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr); } cuda_timer_stop(start, stop, mili); CPUtimer += seconds() - t0; GPUTime += mili; if(Opt.verbose){ cout << "Tile: " << tile << " - time: " << mili << "ms"; cout <<" nnz: " << TiledX[tile].totNnz << " nFibers: " << TiledX[tile].fbrPtr[1].size() << " nSlc " << TiledX[tile].fbrIdx[0].size() << " "; cout << endl; } } allModeGPUTime += GPUTime; cout << "singleCSF-GPU-mode " << MTTKRPmode <<" :" << GPUTime << "," << endl; } } cout << "Total GPU time: " << allModeGPUTime << ", nnz:" << TiledX[0].totNnz << ", nFibers:" << TiledX[0].fbrPtr[1].size() << ", nSlc:" << TiledX[0].fbrIdx[0].size() << endl; for (int bin = 0; bin < Opt.nBin; ++bin) cudaStreamDestroy(streams[bin]); /* Copying output matrix from GPU to CPU for correctness check */ int MTTKRPmode = TiledX[0].ndims - 1; ITYPE loc = ((TiledX[0].ndims == 3) ? szDU[0] + szDU[1] : szDU[0] + szDU[1] + szDU[2]); checkCuda(cudaMemcpy(&U[MTTKRPmode].vals[0], dU + loc, U[MTTKRPmode].nRows * U[MTTKRPmode].nCols * sizeof(DTYPE), cudaMemcpyDeviceToHost), 0); // check correctness // if(Opt.impType == 14){ // MTTKRPmode = 3; // checkCuda(cudaMemcpy(&U[MTTKRPmode].vals[0] , dU + szDU[0] +szDU[1] + szDU[2], U[MTTKRPmode].nRows * U[MTTKRPmode].nCols * sizeof(DTYPE), cudaMemcpyDeviceToHost), 0); // } // else // checkCuda(cudaMemcpy(&U[mode0].vals[0], dU, U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE), cudaMemcpyDeviceToHost), 0); cudaFree(dVals); cudaFree(dU); //cudaFree(dU1); cudaFree(dU2); cudaFree(dU3); cudaFree(dfbrIdx0); cudaFree(dInds2); cudaFree(dInds3); cudaFree(dfbrIdx0); cudaFree(dfbrIdx1); cudaFree(dFbrIdx2); cudaFree(dfbrPtr0); cudaFree(dfbrPtr1); cudaFree(dFbrPtr2); cudaFree(dFbrLikeSlcInds); return 0; } int MTTKRP_MIHCSR_GPU(TiledTensor *TiledX, Matrix *U, const Options &Opt){ ITYPE *dInds2, *dInds3, *dfbrPtr0, *dfbrIdx0, *dfbrPtr1, *dfbrIdx1, *dFbrPtr2, *dFbrIdx2, *dFbrLikeSlcInds; DTYPE *dVals; ITYPE dLoc = 0, dSlcLoc = 0, dSlcIdxLoc = 0, dFbrLoc =0, dFbrIdxLoc =0, dBinLoc = 0, dFbrLoc2 =0; ITYPE totNnz = 0, totSlcPtr = 0, totSlcIdx = 0, totFbrPtr = 0, totFbrIdx = 0, totFbrPtr2 = 0; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); float memcpyTime = 0; // All m same mode ITYPE mode0 = 0;//TiledX[0].modeOrder[0]; ITYPE mode1 = 1;;//TiledX[0].modeOrder[1]; ITYPE mode2 = 2;//TiledX[0].modeOrder[2]; ITYPE mode3 = 3;//((TiledX[0].ndims == 4) ? TiledX[0].modeOrder[3] : 0) ; for (int m = 0; m < TiledX[0].ndims; ++m){ if (TiledX[m].totNnz == 0) continue; totNnz += TiledX[m].totNnz; totSlcPtr += TiledX[m].fbrPtr[0].size() ; totSlcIdx += TiledX[m].fbrIdx[0].size() ; totFbrPtr += TiledX[m].fbrPtr[1].size() ; totFbrIdx += TiledX[m].fbrIdx[1].size() ; totFbrPtr2 += ((TiledX[m].ndims == 4) ? TiledX[m].fbrPtr[2].size() : 0) ; } //allocate and memcpy GPU memory //Tensor cuda_timer_start(start); checkCuda(cudaMalloc((void**) &dVals, totNnz * sizeof(DTYPE)), 0); checkCuda(cudaMalloc((void**) &dfbrPtr0, totSlcPtr * sizeof(ITYPE)), 0); checkCuda(cudaMalloc((void**) &dfbrIdx0, totSlcIdx * sizeof(ITYPE)), 0); // checkCuda(cudaMalloc((void**) &dSlcMapperBin, totSlcPtr * sizeof(ITYPE)), 0); checkCuda(cudaMalloc((void**) &dfbrPtr1, totFbrPtr * sizeof(ITYPE)), 0); checkCuda(cudaMalloc((void**) &dfbrIdx1, totFbrIdx * sizeof(ITYPE)), 0); checkCuda(cudaMalloc((void**) &dFbrLikeSlcInds, totFbrIdx * sizeof(ITYPE)), 0); if(TiledX[0].ndims == 3) checkCuda(cudaMalloc((void**) &dInds2, totNnz * sizeof(ITYPE)), 0); if(TiledX[0].ndims == 4){ checkCuda(cudaMalloc((void**) &dFbrIdx2, totFbrPtr2 * sizeof(ITYPE)), 0); checkCuda(cudaMalloc((void**) &dFbrPtr2, totFbrPtr2 * sizeof(ITYPE)), 0); checkCuda(cudaMalloc((void**) &dInds3, totNnz * sizeof(ITYPE)), 0); } /* cuda memcopy for tiled parts*/ for (int m = 0; m < TiledX[0].ndims; ++m){ if(m > 0) { if (TiledX[m-1].totNnz > 0) { dLoc += TiledX[m-1].totNnz; dSlcLoc += TiledX[m - 1].fbrPtr[0].size(); // all m same dSlcIdxLoc += TiledX[m - 1].fbrIdx[0].size(); dFbrLoc += TiledX[m - 1].fbrPtr[1].size(); dFbrIdxLoc += TiledX[m - 1].fbrIdx[1].size(); dFbrLoc2 += ((TiledX[m].ndims == 4) ? TiledX[m - 1].fbrPtr[2].size() : 0) ; } } if (TiledX[m].totNnz == 0) continue; checkCuda(cudaMemcpy(dVals + dLoc, &(TiledX[m].vals[0]), TiledX[m].totNnz * sizeof(DTYPE),cudaMemcpyHostToDevice), 0); checkCuda(cudaMemcpy(dfbrPtr0 + dSlcLoc, &(TiledX[m].fbrPtr[0][0]), TiledX[m].fbrPtr[0].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0); checkCuda(cudaMemcpy(dfbrIdx0 + dSlcIdxLoc, &(TiledX[m].fbrIdx[0][0]), TiledX[m].fbrIdx[0].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0); checkCuda(cudaMemcpy(dfbrPtr1 + dFbrLoc, &(TiledX[m].fbrPtr[1][0]), TiledX[m].fbrPtr[1].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0); checkCuda(cudaMemcpy(dfbrIdx1 + dFbrIdxLoc, &(TiledX[m].fbrIdx[1][0]), TiledX[m].fbrIdx[1].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0); checkCuda(cudaMemcpy(dFbrLikeSlcInds + dFbrIdxLoc, &(TiledX[m].fbrLikeSlcInds[0]), TiledX[m].fbrIdx[1].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0); if(TiledX[m].ndims == 3){ if(m == 0) // checkCuda(cudaMemcpy(dInds2 + dLoc, &(TiledX[m].inds[mode2][0]), TiledX[m].totNnz * sizeof(ITYPE),cudaMemcpyHostToDevice), 0); checkCuda(cudaMemcpy(dInds2 + dLoc, &(TiledX[m].inds[TiledX[m].modeOrder[2]][0]), TiledX[m].totNnz * sizeof(ITYPE),cudaMemcpyHostToDevice), 0); else if(m == 1) checkCuda(cudaMemcpy(dInds2 + dLoc, &(TiledX[m].inds[TiledX[m].modeOrder[2]][0]), TiledX[m].totNnz * sizeof(ITYPE),cudaMemcpyHostToDevice), 0); else if(m == 2) checkCuda(cudaMemcpy(dInds2 + dLoc, &(TiledX[m].inds[TiledX[m].modeOrder[2]][0]), TiledX[m].totNnz * sizeof(ITYPE),cudaMemcpyHostToDevice), 0); } if(TiledX[m].ndims == 4){ checkCuda(cudaMemcpy(dFbrPtr2 + dFbrLoc2, &(TiledX[m].fbrPtr[2][0]), TiledX[m].fbrPtr[2].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0); checkCuda(cudaMemcpy(dFbrIdx2 + dFbrLoc2, &(TiledX[m].fbrIdx[2][0]), TiledX[m].fbrIdx[2].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0); checkCuda(cudaMemcpy(dInds3 + dLoc, &(TiledX[m].inds[TiledX[m].modeOrder[3]][0]), TiledX[m].totNnz * sizeof(ITYPE),cudaMemcpyHostToDevice), 0); } } cuda_timer_stop(start, stop, memcpyTime); cout << "CPU to GPU Memcopy time: " << memcpyTime << endl; // //Matrices unsigned int *dULoc = new unsigned int[TiledX[0].ndims]; unsigned int *szDU = new unsigned int[TiledX[0].ndims]; // //Matrices DTYPE *dU;// *dU0, *dU1, *dU2, *dU3; ITYPE mtxSize = ((TiledX[0].ndims == 3) ? (U[mode0].nRows + U[mode1].nRows + U[mode2].nRows) * U[mode0].nCols : (U[mode0].nRows + U[mode1].nRows + U[mode2].nRows + U[mode3].nRows) * U[mode0].nCols ); checkCuda(cudaMalloc((void**) &dU, mtxSize * sizeof(DTYPE)), 0); for (int m = 0; m < TiledX[0].ndims; ++m) szDU[m] = U[m].nRows * U[m].nCols; cudaMemset(dU+0, 0, U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE)); checkCuda(cudaMemcpy(dU + szDU[0], &(U[mode1].vals[0]), U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice), 0); checkCuda(cudaMemcpy(dU + szDU[0] + szDU[1], &(U[mode2].vals[0]), U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice), 0); if(TiledX[0].ndims == 4) checkCuda(cudaMemcpy(dU + szDU[0] + szDU[1] + szDU[2], &(U[mode3].vals[0]), U[mode3].nRows * U[mode3].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice), 0); // BLOCK and GRID int BLOCKSIZE = 512; unsigned int rowInATB = BLOCKSIZE / (Opt.warpPerSlice*32); // if(Opt.warpPerSlice * 32 > BLOCKSIZE){ // cout << "BLOCKSIZE is smaller than work per slice! Increase BLOCKSIZE." << endl; // exit(0); // } cudaStream_t streams[Opt.nBin]; float mili = 0, GPUTime = 0, CPUtimer = 0, allModeGPUTime = 0; int smallBinEndsAt = 5; /* Warp per slice and threadblock per slice */ int *warpPerSlc = new int[Opt.nBin]; int *logOfWarpPerSlc = new int[Opt.nBin]; int *TbPerSlc = new int[Opt.nBin]; int *logOfTbPerSlc = new int[Opt.nBin]; for (int bin = 0; bin < Opt.nBin ; ++bin){ TbPerSlc[bin] = 1; warpPerSlc[bin] = ((bin > 0) ? 2 << (bin - 1) : 1); if(warpPerSlc[bin] > 16) warpPerSlc[bin] = 16; logOfWarpPerSlc[bin] = log2(warpPerSlc[bin]); TbPerSlc[bin] = 1; logOfTbPerSlc[bin] = 0; if (bin >= smallBinEndsAt){ TbPerSlc[bin] = 1 << (bin - smallBinEndsAt + 1); // 1st big bin starts with 1 TB 1 << 1 not 1 << 5 if(TbPerSlc[bin] > 32) TbPerSlc[bin] = 32; logOfTbPerSlc[bin] = log2(TbPerSlc[bin]); warpPerSlc[bin] = 16; logOfWarpPerSlc[bin] = 4; } } // TBD: change warpPerSlc to warpPerSlc[bin] and all int slcPerTb = 1; dLoc = 0, dSlcLoc = 0, dSlcIdxLoc = 0; dFbrLoc =0, dFbrIdxLoc = 0, dFbrLoc2= 0; for (int bin = 0; bin < Opt.nBin; ++bin) cudaStreamCreate(&streams[bin]); for (int MTTKRPmode = 0; MTTKRPmode < TiledX[0].ndims; ++MTTKRPmode){ if(MTTKRPmode > 0){ mili = 0; GPUTime = 0; CPUtimer = 0; dLoc = 0; dSlcLoc = 0; dSlcIdxLoc = 0; dFbrLoc =0; dFbrIdxLoc = 0; dFbrLoc2= 0; // MTTKRP on mode mode 0 changed DU0. To pass correctness for now initializing to 2 again. int mode = MTTKRPmode - 1; for(long r = 0; r < U[mode].nRows; ++r){ for(long c = 0; c < U[mode].nCols; ++c) // or u[mode].nCols U[mode].vals[r * U[mode].nCols + c] = mode + .5;// 0.1 * drand48(); //1 ;//(r * R + c + 1); // } if(MTTKRPmode == 1){ checkCuda(cudaMemcpy(dU + 0, &(U[mode0].vals[0]), U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice), 0); cudaMemset(dU + szDU[0], 0, U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE)); } else if(MTTKRPmode == 2){ checkCuda(cudaMemcpy(dU + szDU[0], &(U[mode1].vals[0]), U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice), 0); cudaMemset(dU + szDU[0] + szDU[1], 0, U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE)); } else if(MTTKRPmode == 3){ checkCuda(cudaMemcpy(dU + szDU[0] + szDU[1] , &(U[mode2].vals[0]), U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice), 0); cudaMemset(dU + szDU[0] + szDU[1] + szDU[2], 0, U[mode3].nRows * U[mode3].nCols * sizeof(DTYPE)); } } for (int m = 0; m < TiledX[0].ndims; ++m){ /* matrix order according to mode order*/ for (int mm = 0; mm < TiledX[0].ndims; ++mm){ int curMode = TiledX[m].modeOrder[mm]; dULoc[mm] = 0; for (int q = 0; q < curMode; ++q) dULoc[mm] += szDU[q % TiledX[0].ndims]; //1 2 3 0 } dBinLoc = 0; if(m > 0) { if (TiledX[m-1].totNnz > 0) { dLoc += TiledX[m-1].totNnz; dSlcLoc += TiledX[m - 1].fbrPtr[0].size(); dSlcIdxLoc += TiledX[m - 1].fbrIdx[0].size(); dFbrLoc += TiledX[m - 1].fbrPtr[1].size(); dFbrIdxLoc += TiledX[m - 1].fbrIdx[1].size(); dFbrLoc2 += ((TiledX[0].ndims == 4) ? TiledX[m - 1].fbrPtr[2].size(): 0) ; } } BLOCKSIZE = 512; dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1); if (TiledX[m].totNnz == 0) continue; cuda_timer_start(start); if(TiledX[m].modeOrder[0] == MTTKRPmode && TiledX[m].totNnz){ if(Opt.verbose) cout << "Slc atomics - " ; // BLOCKSIZE = 128; BLOCKSIZE = Opt.TBsize; dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1); int warpPerFbr = Opt.warpPerSlice;//4;//; int logOfWarpPerFbr = log2(warpPerFbr); int fbrPerWarp = Opt.fiberPerWarp;//1;//BLOCKSIZE/32; // dont overflow TB int logOfFbrPerWarp = log2(fbrPerWarp ); if( (warpPerFbr > (BLOCKSIZE/32)) || (fbrPerWarp > (BLOCKSIZE/32)) ){ cout << "warpPerFbr (-w) or fbrPerWarp (-s) cannot be higher than threadblock size!" << endl << "hint: increase -b!" << endl; exit(0); } grid.x = ( warpPerFbr * 32 * ((TiledX[m].nFibers + fbrPerWarp-1)/fbrPerWarp) + BLOCKSIZE - 1) / BLOCKSIZE; if(TiledX[0].ndims == 3) mttkrp_MIHCSR_kernel_slc_atomic_fbrLvlPar<<<grid, block, 0, 0>>>(dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc, dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, TiledX[m].nFibers, dU + dULoc[0], dU + dULoc[1], dU + dULoc[2], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr, fbrPerWarp, logOfFbrPerWarp); else if(TiledX[0].ndims == 4) mttkrp_MIHCSR_kernel_slc_atomic_fbrLvlPar_4D<<<grid, block, 0, 0>>>(dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc, dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, TiledX[m].nFibers, dU + dULoc[0], dU + dULoc[1], dU + dULoc[2], dU + dULoc[3], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr, fbrPerWarp, logOfFbrPerWarp); } else if(TiledX[0].ndims == 4 && TiledX[m].modeOrder[1] == MTTKRPmode && TiledX[m].totNnz){ if(Opt.verbose) cout << "FbrS atomics - "; BLOCKSIZE = Opt.TBsize; dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1); int warpPerFbr = Opt.warpPerSlice;//1;//BLOCKSIZE/32;//1;////4;//; if(warpPerFbr > (BLOCKSIZE/32)){ cout << "warpPerFbr (-w) cannot be higher than threadblock size! hint: increase -b!" << endl; exit(0); } int logOfWarpPerFbr = log2(warpPerFbr); grid.x = ( warpPerFbr * 32 * TiledX[m].nFibers + BLOCKSIZE - 1) / BLOCKSIZE; mttkrp_MIHCSR_kernel_fbrS_atomic_fbrLvlPar_4D<<<grid, block, 0, 0>>>(dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc, dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, TiledX[m].nFibers, dU + dULoc[1], dU + dULoc[2], dU + dULoc[3], dU + dULoc[0], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr); } else if(TiledX[m].modeOrder[TiledX[0].ndims-2] == MTTKRPmode && TiledX[m].totNnz){ if(Opt.verbose) cout << "Fbr atomics - "; BLOCKSIZE = Opt.TBsize; dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1); int warpPerFbr = Opt.warpPerSlice;//4;//;BLOCKSIZE/32;// if(warpPerFbr > (BLOCKSIZE/32)){ cout << "warpPerFbr (-w) cannot be higher than threadblock size! hint: increase -b!" << endl; exit(0); } int logOfWarpPerFbr = log2(warpPerFbr); grid.x = ( warpPerFbr * 32 * TiledX[m].nFibers + BLOCKSIZE - 1) / BLOCKSIZE; if(TiledX[0].ndims == 3) mttkrp_MIHCSR_kernel_fbr_atomic_fbrLvlPar<<<grid, block, 0, 0>>>(dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc, dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, TiledX[m].nFibers, dU + dULoc[1], dU + dULoc[2], dU + dULoc[0], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr); else if (TiledX[0].ndims == 4) mttkrp_MIHCSR_kernel_fbr_atomic_fbrLvlPar_4D<<<grid, block, 0, 0>>>(dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc, dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, TiledX[m].nFibers, dU + dULoc[2], dU + dULoc[3], dU + dULoc[0], dU + dULoc[1], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr); } else if(TiledX[m].modeOrder[TiledX[0].ndims-1] == MTTKRPmode && TiledX[m].totNnz){ if(Opt.verbose) cout << "nnz atomics - " ; BLOCKSIZE = Opt.TBsize; dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1); int warpPerFbr = Opt.warpPerSlice;//4;//;BLOCKSIZE/32;// if(warpPerFbr > (BLOCKSIZE/32)){ cout << "warpPerFbr (-w) cannot be higher than threadblock size! hint: increase -b!" << endl; exit(0); } int logOfWarpPerFbr = log2(warpPerFbr); grid.x = ( warpPerFbr * 32 * TiledX[m].nFibers + BLOCKSIZE - 1) / BLOCKSIZE; if (TiledX[0].ndims == 3) mttkrp_MIHCSR_kernel_all_atomic_fbrLvlPar<<<grid, block, 0, 0>>>(dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc, dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, TiledX[m].nFibers, dU + dULoc[2], dU + dULoc[0], dU + dULoc[1], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr); else if (TiledX[0].ndims == 4) mttkrp_MIHCSR_kernel_all_atomic_fbrLvlPar_4D<<<grid, block, 0, 0>>>(dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc, dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, TiledX[m].nFibers, dU + dULoc[3], dU + dULoc[0], dU + dULoc[1], dU + dULoc[2], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr); } cuda_timer_stop(start, stop, mili); GPUTime += mili; if(Opt.verbose) { cout << "Tile: " << m << " - time: " << mili << " ms"; cout <<" nnz: " << TiledX[m].totNnz << " nFibers: " << TiledX[m].fbrPtr[1].size() << " nSlc " << TiledX[m].fbrIdx[0].size() << " "; cout << " modeOrder: " << TiledX[m].modeOrder[0] <<" " << TiledX[m].modeOrder[1] <<" " << TiledX[m].modeOrder[2]; cout << endl; } } if(Opt.verbose) cout << "MI-HCSR-GPU-mode "<< MTTKRPmode <<" : " << GPUTime << "," << endl; allModeGPUTime += GPUTime; } int totalMIslics = 0, totalMISfibers = 0, totalMIfibers = 0, totalMInnz = 0;; for (int m = 0; m < TiledX[0].ndims; ++m){ if(TiledX[m].totNnz){ if(TiledX[m].ndims == 3){ totalMIslics += TiledX[m].fbrIdx[0].size(); totalMIfibers += TiledX[m].fbrPtr[1].size(); totalMInnz += TiledX[m].totNnz; } if(TiledX[m].ndims == 4){ totalMIslics += TiledX[m].fbrIdx[0].size(); totalMISfibers += TiledX[m].fbrPtr[1].size(); totalMIfibers += TiledX[m].fbrPtr[2].size(); totalMInnz += TiledX[m].totNnz; } } } cout << "Total GPU time: " << allModeGPUTime; // if(Opt.verbose) if(TiledX[0].ndims == 3) cout << " nSlc:" << totalMIslics << ", nFibers:" << totalMIfibers << ", nnz:" << totalMInnz << endl; else if(TiledX[0].ndims == 4) cout << " nSlc:" << totalMIslics << ", nSFibers:" << totalMISfibers << ", nFibers:" << totalMIfibers << ", nnz:" << totalMInnz << endl; for (int bin = 0; bin < Opt.nBin; ++bin) cudaStreamDestroy(streams[bin]); /* Copying output matrix from GPU to CPU for correctness check */ int MTTKRPmode = TiledX[0].ndims - 1; ITYPE loc = ((TiledX[0].ndims == 3) ? szDU[0] + szDU[1] : szDU[0] + szDU[1] + szDU[2]); checkCuda(cudaMemcpy(&U[MTTKRPmode].vals[0], dU + loc, U[MTTKRPmode].nRows * U[MTTKRPmode].nCols * sizeof(DTYPE), cudaMemcpyDeviceToHost), 0); cudaFree(dVals); cudaFree(dU); //cudaFree(dU1); cudaFree(dU2); cudaFree(dU3); cudaFree(dfbrIdx0); cudaFree(dInds2); cudaFree(dInds3); cudaFree(dfbrIdx0); cudaFree(dfbrIdx1); cudaFree(dFbrIdx2); cudaFree(dfbrPtr0); cudaFree(dfbrPtr1); cudaFree(dFbrPtr2); cudaFree(dFbrLikeSlcInds); return 0; } int init_GPU(TiledTensor *TiledX, Matrix *U, const Options &Opt, ITYPE **dInds2, ITYPE **dfbrPtr1, ITYPE **dfbrIdx1, ITYPE **dFbrLikeSlcInds, DTYPE **dVals, DTYPE **dU){ ITYPE mode0 = 0;//TiledX[0].modeOrder[0]; ITYPE mode1 = 1;;//TiledX[0].modeOrder[1]; ITYPE mode2 = 2;//TiledX[0].modeOrder[2]; ITYPE mode3 = 3;//((TiledX[0].ndims == 4) ? TiledX[0].modeOrder[3] : 0) ; // if(iter == 0 && cpdMode == 0) ITYPE dLoc = 0, dSlcLoc = 0, dSlcIdxLoc = 0, dFbrLoc =0, dFbrIdxLoc =0, dBinLoc = 0, dFbrLoc2 =0; ITYPE totNnz = 0, totSlcPtr = 0, totSlcIdx = 0, totFbrPtr = 0, totFbrIdx = 0, totFbrPtr2 = 0; for (int m = 0; m < TiledX[0].ndims; ++m){ if (TiledX[m].totNnz == 0) continue; totNnz += TiledX[m].totNnz; totFbrPtr += TiledX[m].fbrPtr[1].size() ; totFbrIdx += TiledX[m].fbrIdx[1].size() ; totFbrPtr2 += ((TiledX[m].ndims == 4) ? TiledX[m].fbrPtr[2].size() : 0) ; } /*allocate and memcpy GPU memory*/ checkCuda(cudaMalloc((void**) dVals, totNnz * sizeof(DTYPE)), 0); checkCuda(cudaMalloc((void**) dfbrPtr1, totFbrPtr * sizeof(ITYPE)), 0); checkCuda(cudaMalloc((void**) dfbrIdx1, totFbrIdx * sizeof(ITYPE)), 0); checkCuda(cudaMalloc((void**) dFbrLikeSlcInds, totFbrIdx * sizeof(ITYPE)), 0); if(TiledX[0].ndims == 3) checkCuda(cudaMalloc((void**) dInds2, totNnz * sizeof(ITYPE)), 0); for (int m = 0; m < TiledX[0].ndims; ++m){ if(m > 0) { if (TiledX[m-1].totNnz > 0) { dLoc += TiledX[m-1].totNnz; dSlcLoc += TiledX[m - 1].fbrPtr[0].size(); // all m same dSlcIdxLoc += TiledX[m - 1].fbrIdx[0].size(); dFbrLoc += TiledX[m - 1].fbrPtr[1].size(); dFbrIdxLoc += TiledX[m - 1].fbrIdx[1].size(); } } if (TiledX[m].totNnz == 0) continue; checkCuda(cudaMemcpy(*dVals + dLoc, &(TiledX[m].vals[0]), TiledX[m].totNnz * sizeof(DTYPE),cudaMemcpyHostToDevice), 0); checkCuda(cudaMemcpy(*dfbrPtr1 + dFbrLoc, &(TiledX[m].fbrPtr[1][0]), TiledX[m].fbrPtr[1].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0); checkCuda(cudaMemcpy(*dfbrIdx1 + dFbrIdxLoc, &(TiledX[m].fbrIdx[1][0]), TiledX[m].fbrIdx[1].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0); checkCuda(cudaMemcpy(*dFbrLikeSlcInds + dFbrIdxLoc, &(TiledX[m].fbrLikeSlcInds[0]), TiledX[m].fbrIdx[1].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0); checkCuda(cudaMemcpy(*dInds2 + dLoc, &(TiledX[m].inds[TiledX[m].modeOrder[2]][0]), TiledX[m].totNnz * sizeof(ITYPE),cudaMemcpyHostToDevice), 0); } // //Matrices unsigned int *szDU = new unsigned int[TiledX[0].ndims]; ITYPE mtxSize = ((TiledX[0].ndims == 3) ? (U[mode0].nRows + U[mode1].nRows + U[mode2].nRows) * U[mode0].nCols : (U[mode0].nRows + U[mode1].nRows + U[mode2].nRows + U[mode3].nRows) * U[mode0].nCols ); for (int m = 0; m < TiledX[0].ndims; ++m) szDU[m] = U[m].nRows * U[m].nCols; checkCuda(cudaMalloc((void**) dU, mtxSize * sizeof(DTYPE)), 0); // cudaMemset(dU+0, 0, U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE)); checkCuda(cudaMemcpy(*dU + 0, &(U[mode0].vals[0]), U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice), 0); checkCuda(cudaMemcpy(*dU + szDU[0], &(U[mode1].vals[0]), U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice), 0); checkCuda(cudaMemcpy(*dU + szDU[0] + szDU[1], &(U[mode2].vals[0]), U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice), 0); // MTTKRP_MIHCSR_GPU_oneMode_forCPD(TiledX, U, Opt, 0, 0, // dInds2, dfbrPtr1, dfbrIdx1, dFbrLikeSlcInds, dVals, dU); } int MTTKRP_MIHCSR_GPU_oneMode_forCPD(TiledTensor *TiledX, Matrix *U, const Options &Opt, int cpdMode, int iter, ITYPE *dInds2, ITYPE *dfbrPtr1, ITYPE *dfbrIdx1, ITYPE *dFbrLikeSlcInds, DTYPE *dVals, DTYPE *dU){ cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); float mili; ITYPE *dInds3, *dfbrPtr0, *dfbrIdx0, *dFbrPtr2, *dFbrIdx2, *dSlcMapperBin; // DTYPE *dVals; ITYPE dLoc = 0, dSlcLoc = 0, dSlcIdxLoc = 0, dFbrLoc =0, dFbrIdxLoc =0, dBinLoc = 0, dFbrLoc2 =0; unsigned int *dULoc = new unsigned int[TiledX[0].ndims]; unsigned int *szDU = new unsigned int[TiledX[0].ndims]; for (int m = 0; m < TiledX[0].ndims; ++m) szDU[m] = U[m].nRows * U[m].nCols; ITYPE loc = 0; for (int m = 0; m < cpdMode; ++m) loc += szDU[m]; cudaMemset(dU+loc, 0, U[cpdMode].nRows * U[cpdMode].nCols * sizeof(DTYPE)); // BLOCK and GRID int BLOCKSIZE = 512; float GPUTime = 0, CPUtimer = 0, allModeGPUTime = 0; dLoc = 0, dSlcLoc = 0, dSlcIdxLoc = 0; dFbrLoc =0, dFbrIdxLoc = 0, dFbrLoc2= 0; int MTTKRPmode = cpdMode; // for (int MTTKRPmode = 0; MTTKRPmode < TiledX[0].ndims; ++MTTKRPmode) { for (int m = 0; m < TiledX[0].ndims; ++m){ /* matrix order according to mode order*/ for (int mm = 0; mm < TiledX[0].ndims; ++mm){ int curMode = TiledX[m].modeOrder[mm]; dULoc[mm] = 0; for (int q = 0; q < curMode; ++q) dULoc[mm] += szDU[q % TiledX[0].ndims]; //1 2 3 0 } dBinLoc = 0; if(m > 0) { if (TiledX[m-1].totNnz > 0) { dLoc += TiledX[m-1].totNnz; dSlcLoc += TiledX[m - 1].fbrPtr[0].size(); dSlcIdxLoc += TiledX[m - 1].fbrIdx[0].size(); dFbrLoc += TiledX[m - 1].fbrPtr[1].size(); dFbrIdxLoc += TiledX[m - 1].fbrIdx[1].size(); dFbrLoc2 += ((TiledX[0].ndims == 4) ? TiledX[m - 1].fbrPtr[2].size(): 0) ; } } BLOCKSIZE = 512; dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1); if (TiledX[m].totNnz == 0) continue; cuda_timer_start(start); if(TiledX[m].modeOrder[0] == MTTKRPmode && TiledX[m].totNnz){ // if(Opt.verbose) // cout << "Slc atomics - " ; // BLOCKSIZE = 128; BLOCKSIZE = Opt.TBsize; dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1); int warpPerFbr = Opt.warpPerSlice;//4;//; int logOfWarpPerFbr = log2(warpPerFbr); int fbrPerWarp = Opt.fiberPerWarp;//1;//BLOCKSIZE/32; // dont overflow TB int logOfFbrPerWarp = log2(fbrPerWarp ); if( (warpPerFbr > (BLOCKSIZE/32)) || (fbrPerWarp > (BLOCKSIZE/32)) ){ cout << "warpPerFbr (-w) or fbrPerWarp (-s) cannot be higher than threadblock size!" << endl << "hint: increase -b!" << endl; exit(0); } grid.x = ( warpPerFbr * 32 * ((TiledX[m].nFibers + fbrPerWarp-1)/fbrPerWarp) + BLOCKSIZE - 1) / BLOCKSIZE; if(TiledX[0].ndims == 3) mttkrp_MIHCSR_kernel_slc_atomic_fbrLvlPar<<<grid, block, 0, 0>>>(dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc, dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, TiledX[m].nFibers, dU + dULoc[0], dU + dULoc[1], dU + dULoc[2], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr, fbrPerWarp, logOfFbrPerWarp); else if(TiledX[0].ndims == 4) mttkrp_MIHCSR_kernel_slc_atomic_fbrLvlPar_4D<<<grid, block, 0, 0>>>(dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc, dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, TiledX[m].nFibers, dU + dULoc[0], dU + dULoc[1], dU + dULoc[2], dU + dULoc[3], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr, fbrPerWarp, logOfFbrPerWarp); } else if(TiledX[m].modeOrder[TiledX[0].ndims-2] == MTTKRPmode && TiledX[m].totNnz){ // if(Opt.verbose) // cout << "Fbr atomics - "; BLOCKSIZE = Opt.TBsize; dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1); int warpPerFbr = Opt.warpPerSlice;//4;//;BLOCKSIZE/32;// if(warpPerFbr > (BLOCKSIZE/32)){ cout << "warpPerFbr (-w) cannot be higher than threadblock size! hint: increase -b!" << endl; exit(0); } int logOfWarpPerFbr = log2(warpPerFbr); grid.x = ( warpPerFbr * 32 * TiledX[m].nFibers + BLOCKSIZE - 1) / BLOCKSIZE; if(TiledX[0].ndims == 3) mttkrp_MIHCSR_kernel_fbr_atomic_fbrLvlPar<<<grid, block, 0, 0>>>(dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc, dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, TiledX[m].nFibers, dU + dULoc[1], dU + dULoc[2], dU + dULoc[0], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr); else if (TiledX[0].ndims == 4) mttkrp_MIHCSR_kernel_fbr_atomic_fbrLvlPar_4D<<<grid, block, 0, 0>>>(dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc, dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, TiledX[m].nFibers, dU + dULoc[2], dU + dULoc[3], dU + dULoc[0], dU + dULoc[1], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr); } else if(TiledX[m].modeOrder[TiledX[0].ndims-1] == MTTKRPmode && TiledX[m].totNnz){ // if(Opt.verbose) // cout << "nnz atomics - " ; BLOCKSIZE = Opt.TBsize; dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1); int warpPerFbr = Opt.warpPerSlice;//4;//;BLOCKSIZE/32;// if(warpPerFbr > (BLOCKSIZE/32)){ cout << "warpPerFbr (-w) cannot be higher than threadblock size! hint: increase -b!" << endl; exit(0); } int logOfWarpPerFbr = log2(warpPerFbr); grid.x = ( warpPerFbr * 32 * TiledX[m].nFibers + BLOCKSIZE - 1) / BLOCKSIZE; if (TiledX[0].ndims == 3) mttkrp_MIHCSR_kernel_all_atomic_fbrLvlPar<<<grid, block, 0, 0>>>(dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc, dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, TiledX[m].nFibers, dU + dULoc[2], dU + dULoc[0], dU + dULoc[1], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr); else if (TiledX[0].ndims == 4) mttkrp_MIHCSR_kernel_all_atomic_fbrLvlPar_4D<<<grid, block, 0, 0>>>(dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc, dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, TiledX[m].nFibers, dU + dULoc[3], dU + dULoc[0], dU + dULoc[1], dU + dULoc[2], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr); } cuda_timer_stop(start, stop, mili); GPUTime += mili; if(Opt.verbose) { cout << "Tile: " << m << " - time: " << mili << " ms"; cout <<" nnz: " << TiledX[m].totNnz << " nFibers: " << TiledX[m].fbrPtr[1].size() << " nSlc " << TiledX[m].fbrIdx[0].size() << " "; cout << " modeOrder: " << TiledX[m].modeOrder[0] <<" " << TiledX[m].modeOrder[1] <<" " << TiledX[m].modeOrder[2]; cout << endl; } } // cout << "MI-HCSR-GPU-mode "<< MTTKRPmode <<" : " << GPUTime << "," << endl; allModeGPUTime += GPUTime; } // ITYPE loc = 0; // for (int m = 0; m < cpdMode; ++m) // loc += szDU[m]; // ITYPE loc = szDU[0]; /* Copying output matrix from GPU to CPU for correctness check */ checkCuda(cudaMemcpy(&U[cpdMode].vals[0], dU + loc, U[cpdMode].nRows * U[cpdMode].nCols * sizeof(DTYPE), cudaMemcpyDeviceToHost), 0); if(iter == Opt.cpdIters - 1 && cpdMode == TiledX[0].ndims - 1) { cout << "Freeing variable " << endl; cudaFree(dVals); cudaFree(dU); //cudaFree(dU1); cudaFree(dU2); cudaFree(dU3); cudaFree(dfbrIdx0); cudaFree(dInds2); cudaFree(dInds3); cudaFree(dfbrIdx0); cudaFree(dfbrIdx1); cudaFree(dFbrIdx2); cudaFree(dfbrPtr0); cudaFree(dfbrPtr1); cudaFree(dFbrPtr2); cudaFree(dFbrLikeSlcInds); } return 0; }
53271796514c960b1b16f47a92bf8fe6e65f5e58.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> //Funkcja uruchamiana na karcie graficznej (kernel) __global__ void add(int *a, int *b, int *c) { *c = *a + *b; } int main(void) { int a, b, c; // Tablice przechowywane w pamieci hosta int *d_a, *d_b, *d_c; // Tablice przechowywane na urzdzeniu CUDA int size = sizeof(int); // Alokacja pamieci na urzdzeniu CUDA hipMalloc((void **)&d_a, size); hipMalloc((void **)&d_b, size); hipMalloc((void **)&d_c, size); // Przykdowe wartoci a = 2; b = 7; // Kopiowanie wartosci na urzdzenie hipMemcpy(d_a, &a, size, hipMemcpyHostToDevice); hipMemcpy(d_b, &b, size, hipMemcpyHostToDevice); // Uruchomienie kernela na urzdzeniu CUDA - 1 blok - 1 wtek hipLaunchKernelGGL(( add), dim3(1),dim3(1), 0, 0, d_a, d_b, d_c); // Kopiowanie uzyskanych wartoci hipMemcpy(&c, d_c, size, hipMemcpyDeviceToHost); // Czyszczenie printf("%d+%d=%d\n",a,b,c); hipFree(d_a); hipFree(d_b); hipFree(d_c); return 0; }
53271796514c960b1b16f47a92bf8fe6e65f5e58.cu
#include<stdio.h> //Funkcja uruchamiana na karcie graficznej (kernel) __global__ void add(int *a, int *b, int *c) { *c = *a + *b; } int main(void) { int a, b, c; // Tablice przechowywane w pamieci hosta int *d_a, *d_b, *d_c; // Tablice przechowywane na urządzeniu CUDA int size = sizeof(int); // Alokacja pamieci na urządzeniu CUDA cudaMalloc((void **)&d_a, size); cudaMalloc((void **)&d_b, size); cudaMalloc((void **)&d_c, size); // Przykłdowe wartości a = 2; b = 7; // Kopiowanie wartosci na urządzenie cudaMemcpy(d_a, &a, size, cudaMemcpyHostToDevice); cudaMemcpy(d_b, &b, size, cudaMemcpyHostToDevice); // Uruchomienie kernela na urządzeniu CUDA - 1 blok - 1 wątek add<<<1,1>>>(d_a, d_b, d_c); // Kopiowanie uzyskanych wartości cudaMemcpy(&c, d_c, size, cudaMemcpyDeviceToHost); // Czyszczenie printf("%d+%d=%d\n",a,b,c); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); return 0; }
a47bc8322cc9d6de29ead14c63ce153717700f51.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "parallel.cuh" #include "build.h" namespace hagrid { __global__ void count_sentinel_refs(const Cell* cells, int* ref_counts, int num_cells) { int id = threadIdx.x + blockDim.x * blockIdx.x; if (id >= num_cells) return; auto cell = load_cell(cells + id); auto count = cell.end - cell.begin; ref_counts[id] = count > 0 ? count + 1 : 0; } __global__ void emit_small_cells(const Cell* cells, SmallCell* small_cells, int* __restrict__ refs, int* __restrict__ ref_scan, int* __restrict__ sentinel_refs, int num_cells) { int id = threadIdx.x + blockDim.x * blockIdx.x; if (id >= num_cells) return; auto cell = load_cell(cells + id); int first = ref_scan[id]; int count = cell.end - cell.begin; SmallCell small_cell(usvec3(cell.min), usvec3(cell.max), count > 0 ? first : -1); store_cell(small_cells + id, small_cell); if (count > 0) { for (int i = 0; i < count; i++) sentinel_refs[first + i] = refs[cell.begin + i]; sentinel_refs[first + count] = -1; } } bool compress_grid(MemManager& mem, Grid& grid) { auto dims = grid.dims << grid.shift; // Compression cannot work if the dimensions cannot fit into 16-bit indices if (dims.x >= (1 << 16) || dims.y >= (1 << 16) || dims.z >= (1 << 16)) return false; Parallel par(mem); auto ref_counts = mem.alloc<int>(grid.num_cells + 1); auto ref_scan = mem.alloc<int>(grid.num_cells + 1); auto small_cells = mem.alloc<SmallCell>(grid.num_cells); hipLaunchKernelGGL(( count_sentinel_refs), dim3(round_div(grid.num_cells, 64)), dim3(64), 0, 0, grid.cells, ref_counts, grid.num_cells); auto num_sentinel_refs = par.scan(ref_counts, grid.num_cells + 1, ref_scan); auto sentinel_refs = mem.alloc<int>(num_sentinel_refs); hipLaunchKernelGGL(( emit_small_cells), dim3(round_div(grid.num_cells, 64)), dim3(64), 0, 0, grid.cells, small_cells, grid.ref_ids, ref_scan, sentinel_refs, grid.num_cells); grid.small_cells = small_cells; mem.free(grid.cells); mem.free(grid.ref_ids); mem.free(ref_counts); mem.free(ref_scan); grid.cells = nullptr; grid.ref_ids = sentinel_refs; grid.num_refs = num_sentinel_refs; return true; } } // namespace hagrid
a47bc8322cc9d6de29ead14c63ce153717700f51.cu
#include "parallel.cuh" #include "build.h" namespace hagrid { __global__ void count_sentinel_refs(const Cell* cells, int* ref_counts, int num_cells) { int id = threadIdx.x + blockDim.x * blockIdx.x; if (id >= num_cells) return; auto cell = load_cell(cells + id); auto count = cell.end - cell.begin; ref_counts[id] = count > 0 ? count + 1 : 0; } __global__ void emit_small_cells(const Cell* cells, SmallCell* small_cells, int* __restrict__ refs, int* __restrict__ ref_scan, int* __restrict__ sentinel_refs, int num_cells) { int id = threadIdx.x + blockDim.x * blockIdx.x; if (id >= num_cells) return; auto cell = load_cell(cells + id); int first = ref_scan[id]; int count = cell.end - cell.begin; SmallCell small_cell(usvec3(cell.min), usvec3(cell.max), count > 0 ? first : -1); store_cell(small_cells + id, small_cell); if (count > 0) { for (int i = 0; i < count; i++) sentinel_refs[first + i] = refs[cell.begin + i]; sentinel_refs[first + count] = -1; } } bool compress_grid(MemManager& mem, Grid& grid) { auto dims = grid.dims << grid.shift; // Compression cannot work if the dimensions cannot fit into 16-bit indices if (dims.x >= (1 << 16) || dims.y >= (1 << 16) || dims.z >= (1 << 16)) return false; Parallel par(mem); auto ref_counts = mem.alloc<int>(grid.num_cells + 1); auto ref_scan = mem.alloc<int>(grid.num_cells + 1); auto small_cells = mem.alloc<SmallCell>(grid.num_cells); count_sentinel_refs<<<round_div(grid.num_cells, 64), 64>>>(grid.cells, ref_counts, grid.num_cells); auto num_sentinel_refs = par.scan(ref_counts, grid.num_cells + 1, ref_scan); auto sentinel_refs = mem.alloc<int>(num_sentinel_refs); emit_small_cells<<<round_div(grid.num_cells, 64), 64>>>(grid.cells, small_cells, grid.ref_ids, ref_scan, sentinel_refs, grid.num_cells); grid.small_cells = small_cells; mem.free(grid.cells); mem.free(grid.ref_ids); mem.free(ref_counts); mem.free(ref_scan); grid.cells = nullptr; grid.ref_ids = sentinel_refs; grid.num_refs = num_sentinel_refs; return true; } } // namespace hagrid
8ebe9acf893a5e3d1770b68a8d4778de76066502.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "definitions.h" #include "kernel.h" #include <stdio.h> #include <stdlib.h> #include <string.h> __global__ void kernel_Localization(float *ParamIn, float *ParamNext, float *Convergence, float *FirstDev, float *SecondDev, int Nfit, int N_int, int FitBoxsize, float lambda, float SampleSpacingXY) { const int tx = threadIdx.x; const int bx = blockIdx.x; const int BlockSize = blockDim.x; //Prevent read/write past end of array int j = BlockSize*bx + tx; if ((bx*BlockSize + tx) >= Nfit) return; float stepLimit[NP] = {0.03f, 0.03f, 0.06f, 400, 2}; // x,y,z step limits are in micron float x0_next[NPL]; float dL_pos = 0, dL2_pos = 0; float dL_I, dL2_I; // photon and background float step[NPL]; float rate = 1/(1 + lambda); float tmp; int s, p, k; // x,y,z for (p = 0; p < 3; p++) { for (s = 0; s < NCH; s++) //Biplane; two channel; edited by FX { dL_pos += FirstDev[s*NP*Nfit + j*NP + p]; dL2_pos += SecondDev[s*NP*Nfit + j*NP + p]; } tmp = -1 * dL_pos / dL2_pos * rate; step[p] = fminf(fmaxf(tmp, -stepLimit[p]), stepLimit[p]); } for (s = 0; s < NCH; s++) { // photon dL_I = FirstDev[s*NP*Nfit + j*NP + 3]; dL2_I = SecondDev[s*NP*Nfit + j*NP + 3]; tmp = -1 * dL_I / dL2_I * rate; step[3 + s] = fminf(fmaxf(tmp, -stepLimit[3]), stepLimit[3]); // background dL_I = FirstDev[s*NP*Nfit + j*NP + 4]; dL2_I = SecondDev[s*NP*Nfit + j*NP + 4]; tmp = -1 * dL_I / dL2_I * rate; step[3 + NCH + s] = fminf(fmaxf(tmp, -stepLimit[4]), stepLimit[4]); //??? step need change, edited by FX } x0_next[0] = ParamIn[NPL*j + 0] + step[0] * (-1 / SampleSpacingXY / N_int); x0_next[1] = ParamIn[NPL*j + 1] + step[1] * (-1 / SampleSpacingXY / N_int); for (k = 2; k < NPL; k++) { x0_next[k] = ParamIn[NPL*j + k] + step[k]; } for (s = 0; s < NCH; s++) { x0_next[3 + s] = (x0_next[3 + s] <= 100 ? 100 : x0_next[3 + s]); // intensity is not less than 100 x0_next[3 + NCH + s] = (x0_next[3 + NCH + s] <= 0 ? 0.01f : x0_next[3 + NCH + s]);// bg is not less than 0, edited by FX } x0_next[0] = fminf(fmaxf(x0_next[0], 4), FitBoxsize - 4);// xy shift is within fitting box x0_next[1] = fminf(fmaxf(x0_next[1], 4), FitBoxsize - 4); x0_next[2] = fminf(fmaxf(x0_next[2], -1.2), 1.2);//z position is within -1.4 to 1.4 um for (k = 0; k < NPL; k++) { ParamNext[NPL*j + k] = x0_next[k]; Convergence[NPL*j + k] = x0_next[k] - ParamIn[NPL*j + k]; } } __global__ void kernel_getdev(float *data, float *gainR, float *PSF, float *dPSFx, float *dPSFy, float *dPSFz, float *I, float *bg, int Nfit, int PSFsize, float *FirstDev, float *SecondDev) { const int tx = threadIdx.x; const int bx = blockIdx.x; const int BlockSize = blockDim.x; //Prevent read/write past end of array int j = BlockSize*bx + tx; if ((bx*BlockSize + tx) >= Nfit) return; float dL[NP], dL2[NP]; float psfI; int k, i; for (k = 0; k < NP; k++) { dL[k] = 0; dL2[k] = 0; } fundev(&data[j*PSFsize], &gainR[j*PSFsize], &PSF[j*PSFsize], &dPSFx[j*PSFsize], I[j], I[j], bg[j], &dL[0], &dL2[0], PSFsize); fundev(&data[j*PSFsize], &gainR[j*PSFsize], &PSF[j*PSFsize], &dPSFy[j*PSFsize], I[j], I[j], bg[j], &dL[1], &dL2[1], PSFsize); fundev(&data[j*PSFsize], &gainR[j*PSFsize], &PSF[j*PSFsize], &dPSFz[j*PSFsize], I[j], I[j], bg[j], &dL[2], &dL2[2], PSFsize); fundev(&data[j*PSFsize], &gainR[j*PSFsize], &PSF[j*PSFsize], &PSF[j*PSFsize], I[j], 1.0, bg[j], &dL[3], &dL2[3], PSFsize); for (int i = 0; i < PSFsize; i++) { psfI = PSF[j*PSFsize + i] * I[j] + bg[j] + gainR[j*PSFsize + i]; dL[4] += (data[j*PSFsize + i] + gainR[j*PSFsize + i]) / psfI - 1; dL2[4] += -1 * (data[j*PSFsize + i] + gainR[j*PSFsize + i]) / psfI / psfI; } for (int k = 0; k < NP; k++) { FirstDev[NP * j + k] = dL[k]; SecondDev[NP * j + k] = dL2[k]; } } __device__ void fundev(float *data, float *gainR, float *psf, float *dpsf, float I, float Id, float bg, float *dL, float *dL2, int PSFsize) { float psfI; for (int i = 0; i < PSFsize; i++) { psfI = psf[i] * I + bg + gainR[i]; dL[0] += ((data[i] + gainR[i]) / psfI - 1) * dpsf[i] * Id; dL2[0] += -1 * Id * Id * dpsf[i] * dpsf[i] * (data[i] + gainR[i]) / psfI / psfI; } }
8ebe9acf893a5e3d1770b68a8d4778de76066502.cu
#include "cuda_runtime.h" #include "definitions.h" #include "kernel.h" #include <stdio.h> #include <stdlib.h> #include <string.h> __global__ void kernel_Localization(float *ParamIn, float *ParamNext, float *Convergence, float *FirstDev, float *SecondDev, int Nfit, int N_int, int FitBoxsize, float lambda, float SampleSpacingXY) { const int tx = threadIdx.x; const int bx = blockIdx.x; const int BlockSize = blockDim.x; //Prevent read/write past end of array int j = BlockSize*bx + tx; if ((bx*BlockSize + tx) >= Nfit) return; float stepLimit[NP] = {0.03f, 0.03f, 0.06f, 400, 2}; // x,y,z step limits are in micron float x0_next[NPL]; float dL_pos = 0, dL2_pos = 0; float dL_I, dL2_I; // photon and background float step[NPL]; float rate = 1/(1 + lambda); float tmp; int s, p, k; // x,y,z for (p = 0; p < 3; p++) { for (s = 0; s < NCH; s++) //Biplane; two channel; edited by FX { dL_pos += FirstDev[s*NP*Nfit + j*NP + p]; dL2_pos += SecondDev[s*NP*Nfit + j*NP + p]; } tmp = -1 * dL_pos / dL2_pos * rate; step[p] = fminf(fmaxf(tmp, -stepLimit[p]), stepLimit[p]); } for (s = 0; s < NCH; s++) { // photon dL_I = FirstDev[s*NP*Nfit + j*NP + 3]; dL2_I = SecondDev[s*NP*Nfit + j*NP + 3]; tmp = -1 * dL_I / dL2_I * rate; step[3 + s] = fminf(fmaxf(tmp, -stepLimit[3]), stepLimit[3]); // background dL_I = FirstDev[s*NP*Nfit + j*NP + 4]; dL2_I = SecondDev[s*NP*Nfit + j*NP + 4]; tmp = -1 * dL_I / dL2_I * rate; step[3 + NCH + s] = fminf(fmaxf(tmp, -stepLimit[4]), stepLimit[4]); //??? step need change, edited by FX } x0_next[0] = ParamIn[NPL*j + 0] + step[0] * (-1 / SampleSpacingXY / N_int); x0_next[1] = ParamIn[NPL*j + 1] + step[1] * (-1 / SampleSpacingXY / N_int); for (k = 2; k < NPL; k++) { x0_next[k] = ParamIn[NPL*j + k] + step[k]; } for (s = 0; s < NCH; s++) { x0_next[3 + s] = (x0_next[3 + s] <= 100 ? 100 : x0_next[3 + s]); // intensity is not less than 100 x0_next[3 + NCH + s] = (x0_next[3 + NCH + s] <= 0 ? 0.01f : x0_next[3 + NCH + s]);// bg is not less than 0, edited by FX } x0_next[0] = fminf(fmaxf(x0_next[0], 4), FitBoxsize - 4);// xy shift is within fitting box x0_next[1] = fminf(fmaxf(x0_next[1], 4), FitBoxsize - 4); x0_next[2] = fminf(fmaxf(x0_next[2], -1.2), 1.2);//z position is within -1.4 to 1.4 um for (k = 0; k < NPL; k++) { ParamNext[NPL*j + k] = x0_next[k]; Convergence[NPL*j + k] = x0_next[k] - ParamIn[NPL*j + k]; } } __global__ void kernel_getdev(float *data, float *gainR, float *PSF, float *dPSFx, float *dPSFy, float *dPSFz, float *I, float *bg, int Nfit, int PSFsize, float *FirstDev, float *SecondDev) { const int tx = threadIdx.x; const int bx = blockIdx.x; const int BlockSize = blockDim.x; //Prevent read/write past end of array int j = BlockSize*bx + tx; if ((bx*BlockSize + tx) >= Nfit) return; float dL[NP], dL2[NP]; float psfI; int k, i; for (k = 0; k < NP; k++) { dL[k] = 0; dL2[k] = 0; } fundev(&data[j*PSFsize], &gainR[j*PSFsize], &PSF[j*PSFsize], &dPSFx[j*PSFsize], I[j], I[j], bg[j], &dL[0], &dL2[0], PSFsize); fundev(&data[j*PSFsize], &gainR[j*PSFsize], &PSF[j*PSFsize], &dPSFy[j*PSFsize], I[j], I[j], bg[j], &dL[1], &dL2[1], PSFsize); fundev(&data[j*PSFsize], &gainR[j*PSFsize], &PSF[j*PSFsize], &dPSFz[j*PSFsize], I[j], I[j], bg[j], &dL[2], &dL2[2], PSFsize); fundev(&data[j*PSFsize], &gainR[j*PSFsize], &PSF[j*PSFsize], &PSF[j*PSFsize], I[j], 1.0, bg[j], &dL[3], &dL2[3], PSFsize); for (int i = 0; i < PSFsize; i++) { psfI = PSF[j*PSFsize + i] * I[j] + bg[j] + gainR[j*PSFsize + i]; dL[4] += (data[j*PSFsize + i] + gainR[j*PSFsize + i]) / psfI - 1; dL2[4] += -1 * (data[j*PSFsize + i] + gainR[j*PSFsize + i]) / psfI / psfI; } for (int k = 0; k < NP; k++) { FirstDev[NP * j + k] = dL[k]; SecondDev[NP * j + k] = dL2[k]; } } __device__ void fundev(float *data, float *gainR, float *psf, float *dpsf, float I, float Id, float bg, float *dL, float *dL2, int PSFsize) { float psfI; for (int i = 0; i < PSFsize; i++) { psfI = psf[i] * I + bg + gainR[i]; dL[0] += ((data[i] + gainR[i]) / psfI - 1) * dpsf[i] * Id; dL2[0] += -1 * Id * Id * dpsf[i] * dpsf[i] * (data[i] + gainR[i]) / psfI / psfI; } }
2f9e0ff8b5f2e265c1a986cfec2d4f11cc5e0fb0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <list> #include <unordered_map> #include <vector> #include <iostream> #include <ctime> #include <limits.h> #include <float.h> #include "../dogqc/include/csv.h" #include "../dogqc/include/util.h" #include "../dogqc/include/mappedmalloc.h" #include "../dogqc/include/util.cuh" #include "../dogqc/include/hashing.cuh" struct jpayl6 { int att2_nnationk; str_t att3_nname; }; struct jpayl5 { int att6_oorderke; int att7_ocustkey; }; struct jpayl9 { str_t att3_nname; int att6_oorderke; int att15_ccustkey; str_t att16_cname; str_t att17_caddress; str_t att19_cphone; float att20_cacctbal; str_t att22_ccomment; }; struct apayl11 { int att15_ccustkey; str_t att16_cname; float att20_cacctbal; str_t att19_cphone; str_t att3_nname; str_t att17_caddress; str_t att22_ccomment; }; __global__ void krnl_nation1( int* iatt2_nnationk, size_t* iatt3_nname_offset, char* iatt3_nname_char, unique_ht<jpayl6>* jht6) { int att2_nnationk; str_t att3_nname; int tid_nation1 = 0; unsigned loopVar = ((blockIdx.x * blockDim.x) + threadIdx.x); unsigned step = (blockDim.x * gridDim.x); unsigned flushPipeline = 0; int active = 0; while(!(flushPipeline)) { tid_nation1 = loopVar; active = (loopVar < 25); // flush pipeline if no new elements flushPipeline = !(__ballot_sync(ALL_LANES,active)); if(active) { att2_nnationk = iatt2_nnationk[tid_nation1]; att3_nname = stringScan ( iatt3_nname_offset, iatt3_nname_char, tid_nation1); } // -------- hash join build (opId: 6) -------- if(active) { jpayl6 payl6; payl6.att2_nnationk = att2_nnationk; payl6.att3_nname = att3_nname; uint64_t hash6; hash6 = 0; if(active) { hash6 = hash ( (hash6 + ((uint64_t)att2_nnationk))); } hashBuildUnique ( jht6, 50, hash6, &(payl6)); } loopVar += step; } } __global__ void krnl_orders2( int* iatt6_oorderke, int* iatt7_ocustkey, unsigned* iatt10_oorderda, multi_ht* jht5, jpayl5* jht5_payload) { int att6_oorderke; int att7_ocustkey; unsigned att10_oorderda; int tid_orders1 = 0; unsigned loopVar = ((blockIdx.x * blockDim.x) + threadIdx.x); unsigned step = (blockDim.x * gridDim.x); unsigned flushPipeline = 0; int active = 0; while(!(flushPipeline)) { tid_orders1 = loopVar; active = (loopVar < 1500000); // flush pipeline if no new elements flushPipeline = !(__ballot_sync(ALL_LANES,active)); if(active) { att6_oorderke = iatt6_oorderke[tid_orders1]; att7_ocustkey = iatt7_ocustkey[tid_orders1]; att10_oorderda = iatt10_oorderda[tid_orders1]; } // -------- selection (opId: 3) -------- if(active) { active = ((att10_oorderda >= 19931001) && (att10_oorderda < 19940101)); } // -------- hash join build (opId: 5) -------- if(active) { uint64_t hash5 = 0; if(active) { hash5 = 0; if(active) { hash5 = hash ( (hash5 + ((uint64_t)att7_ocustkey))); } } hashCountMulti ( jht5, 150000, hash5); } loopVar += step; } } __global__ void krnl_orders2_ins( int* iatt6_oorderke, int* iatt7_ocustkey, unsigned* iatt10_oorderda, multi_ht* jht5, jpayl5* jht5_payload, int* offs5) { int att6_oorderke; int att7_ocustkey; unsigned att10_oorderda; int tid_orders1 = 0; unsigned loopVar = ((blockIdx.x * blockDim.x) + threadIdx.x); unsigned step = (blockDim.x * gridDim.x); unsigned flushPipeline = 0; int active = 0; while(!(flushPipeline)) { tid_orders1 = loopVar; active = (loopVar < 1500000); // flush pipeline if no new elements flushPipeline = !(__ballot_sync(ALL_LANES,active)); if(active) { att6_oorderke = iatt6_oorderke[tid_orders1]; att7_ocustkey = iatt7_ocustkey[tid_orders1]; att10_oorderda = iatt10_oorderda[tid_orders1]; } // -------- selection (opId: 3) -------- if(active) { active = ((att10_oorderda >= 19931001) && (att10_oorderda < 19940101)); } // -------- hash join build (opId: 5) -------- if(active) { uint64_t hash5 = 0; if(active) { hash5 = 0; if(active) { hash5 = hash ( (hash5 + ((uint64_t)att7_ocustkey))); } } jpayl5 payl; payl.att6_oorderke = att6_oorderke; payl.att7_ocustkey = att7_ocustkey; hashInsertMulti ( jht5, jht5_payload, offs5, 150000, hash5, &(payl)); } loopVar += step; } } __global__ void krnl_customer4( int* iatt15_ccustkey, size_t* iatt16_cname_offset, char* iatt16_cname_char, size_t* iatt17_caddress_offset, char* iatt17_caddress_char, int* iatt18_cnationk, size_t* iatt19_cphone_offset, char* iatt19_cphone_char, float* iatt20_cacctbal, size_t* iatt22_ccomment_offset, char* iatt22_ccomment_char, multi_ht* jht5, jpayl5* jht5_payload, unique_ht<jpayl6>* jht6, unique_ht<jpayl9>* jht9) { int att15_ccustkey; str_t att16_cname; str_t att17_caddress; int att18_cnationk; str_t att19_cphone; float att20_cacctbal; str_t att22_ccomment; int att6_oorderke; int att7_ocustkey; int att2_nnationk; str_t att3_nname; int tid_customer1 = 0; unsigned loopVar = ((blockIdx.x * blockDim.x) + threadIdx.x); unsigned step = (blockDim.x * gridDim.x); unsigned flushPipeline = 0; int active = 0; while(!(flushPipeline)) { tid_customer1 = loopVar; active = (loopVar < 150000); // flush pipeline if no new elements flushPipeline = !(__ballot_sync(ALL_LANES,active)); if(active) { att15_ccustkey = iatt15_ccustkey[tid_customer1]; att16_cname = stringScan ( iatt16_cname_offset, iatt16_cname_char, tid_customer1); att17_caddress = stringScan ( iatt17_caddress_offset, iatt17_caddress_char, tid_customer1); att18_cnationk = iatt18_cnationk[tid_customer1]; att19_cphone = stringScan ( iatt19_cphone_offset, iatt19_cphone_char, tid_customer1); att20_cacctbal = iatt20_cacctbal[tid_customer1]; att22_ccomment = stringScan ( iatt22_ccomment_offset, iatt22_ccomment_char, tid_customer1); } // -------- hash join probe (opId: 5) -------- int matchEnd5 = 0; int matchOffset5 = 0; int matchStep5 = 1; int matchFound5 = 0; int probeActive5 = active; uint64_t hash5 = 0; if(probeActive5) { hash5 = 0; if(active) { hash5 = hash ( (hash5 + ((uint64_t)att15_ccustkey))); } probeActive5 = hashProbeMulti ( jht5, 150000, hash5, matchOffset5, matchEnd5); } active = probeActive5; while(__any_sync(ALL_LANES,active)) { probeActive5 = active; jpayl5 payl; if(probeActive5) { payl = jht5_payload[matchOffset5]; att6_oorderke = payl.att6_oorderke; att7_ocustkey = payl.att7_ocustkey; active &= ((att7_ocustkey == att15_ccustkey)); matchFound5 += active; } // -------- hash join probe (opId: 6) -------- uint64_t hash6 = 0; if(active) { hash6 = 0; if(active) { hash6 = hash ( (hash6 + ((uint64_t)att18_cnationk))); } } jpayl6* probepayl6; int numLookups6 = 0; if(active) { active = hashProbeUnique ( jht6, 50, hash6, numLookups6, &(probepayl6)); } int bucketFound6 = 0; int probeActive6 = active; while((probeActive6 && !(bucketFound6))) { jpayl6 jprobepayl6 = *(probepayl6); att2_nnationk = jprobepayl6.att2_nnationk; att3_nname = jprobepayl6.att3_nname; bucketFound6 = 1; bucketFound6 &= ((att2_nnationk == att18_cnationk)); if(!(bucketFound6)) { probeActive6 = hashProbeUnique ( jht6, 50, hash6, numLookups6, &(probepayl6)); } } active = bucketFound6; // -------- hash join build (opId: 9) -------- if(active) { jpayl9 payl9; payl9.att3_nname = att3_nname; payl9.att6_oorderke = att6_oorderke; payl9.att15_ccustkey = att15_ccustkey; payl9.att16_cname = att16_cname; payl9.att17_caddress = att17_caddress; payl9.att19_cphone = att19_cphone; payl9.att20_cacctbal = att20_cacctbal; payl9.att22_ccomment = att22_ccomment; uint64_t hash9; hash9 = 0; if(active) { hash9 = hash ( (hash9 + ((uint64_t)att6_oorderke))); } hashBuildUnique ( jht9, 300000, hash9, &(payl9)); } matchOffset5 += matchStep5; probeActive5 &= ((matchOffset5 < matchEnd5)); active = probeActive5; } loopVar += step; } } __global__ void krnl_lineitem7( int* iatt23_lorderke, float* iatt28_lextende, float* iatt29_ldiscoun, char* iatt31_lreturnf, unique_ht<jpayl9>* jht9, agg_ht<apayl11>* aht11, float* agg1) { int att23_lorderke; float att28_lextende; float att29_ldiscoun; char att31_lreturnf; str_t att3_nname; int att6_oorderke; int att15_ccustkey; str_t att16_cname; str_t att17_caddress; str_t att19_cphone; float att20_cacctbal; str_t att22_ccomment; float att39_rev; int tid_lineitem1 = 0; unsigned loopVar = ((blockIdx.x * blockDim.x) + threadIdx.x); unsigned step = (blockDim.x * gridDim.x); unsigned flushPipeline = 0; int active = 0; while(!(flushPipeline)) { tid_lineitem1 = loopVar; active = (loopVar < 6001215); // flush pipeline if no new elements flushPipeline = !(__ballot_sync(ALL_LANES,active)); if(active) { att23_lorderke = iatt23_lorderke[tid_lineitem1]; att28_lextende = iatt28_lextende[tid_lineitem1]; att29_ldiscoun = iatt29_ldiscoun[tid_lineitem1]; att31_lreturnf = iatt31_lreturnf[tid_lineitem1]; } // -------- selection (opId: 8) -------- if(active) { active = (att31_lreturnf == 'R'); } // -------- hash join probe (opId: 9) -------- uint64_t hash9 = 0; if(active) { hash9 = 0; if(active) { hash9 = hash ( (hash9 + ((uint64_t)att23_lorderke))); } } jpayl9* probepayl9; int numLookups9 = 0; if(active) { active = hashProbeUnique ( jht9, 300000, hash9, numLookups9, &(probepayl9)); } int bucketFound9 = 0; int probeActive9 = active; while((probeActive9 && !(bucketFound9))) { jpayl9 jprobepayl9 = *(probepayl9); att3_nname = jprobepayl9.att3_nname; att6_oorderke = jprobepayl9.att6_oorderke; att15_ccustkey = jprobepayl9.att15_ccustkey; att16_cname = jprobepayl9.att16_cname; att17_caddress = jprobepayl9.att17_caddress; att19_cphone = jprobepayl9.att19_cphone; att20_cacctbal = jprobepayl9.att20_cacctbal; att22_ccomment = jprobepayl9.att22_ccomment; bucketFound9 = 1; bucketFound9 &= ((att6_oorderke == att23_lorderke)); if(!(bucketFound9)) { probeActive9 = hashProbeUnique ( jht9, 300000, hash9, numLookups9, &(probepayl9)); } } active = bucketFound9; // -------- map (opId: 10) -------- if(active) { att39_rev = (att28_lextende * ((float)1.0f - att29_ldiscoun)); } // -------- aggregation (opId: 11) -------- int bucket = 0; if(active) { uint64_t hash11 = 0; hash11 = 0; if(active) { hash11 = hash ( (hash11 + ((uint64_t)att15_ccustkey))); } hash11 = hash ( (hash11 + stringHash ( att16_cname))); if(active) { hash11 = hash ( (hash11 + ((uint64_t)att20_cacctbal))); } hash11 = hash ( (hash11 + stringHash ( att19_cphone))); hash11 = hash ( (hash11 + stringHash ( att3_nname))); hash11 = hash ( (hash11 + stringHash ( att17_caddress))); hash11 = hash ( (hash11 + stringHash ( att22_ccomment))); apayl11 payl; payl.att15_ccustkey = att15_ccustkey; payl.att16_cname = att16_cname; payl.att20_cacctbal = att20_cacctbal; payl.att19_cphone = att19_cphone; payl.att3_nname = att3_nname; payl.att17_caddress = att17_caddress; payl.att22_ccomment = att22_ccomment; int bucketFound = 0; int numLookups = 0; while(!(bucketFound)) { bucket = hashAggregateGetBucket ( aht11, 138748, hash11, numLookups, &(payl)); apayl11 probepayl = aht11[bucket].payload; bucketFound = 1; bucketFound &= ((payl.att15_ccustkey == probepayl.att15_ccustkey)); bucketFound &= (stringEquals ( payl.att16_cname, probepayl.att16_cname)); bucketFound &= ((payl.att20_cacctbal == probepayl.att20_cacctbal)); bucketFound &= (stringEquals ( payl.att19_cphone, probepayl.att19_cphone)); bucketFound &= (stringEquals ( payl.att3_nname, probepayl.att3_nname)); bucketFound &= (stringEquals ( payl.att17_caddress, probepayl.att17_caddress)); bucketFound &= (stringEquals ( payl.att22_ccomment, probepayl.att22_ccomment)); } } if(active) { atomicAdd(&(agg1[bucket]), ((float)att39_rev)); } loopVar += step; } } __global__ void krnl_aggregation11( agg_ht<apayl11>* aht11, float* agg1, int* nout_result, int* oatt15_ccustkey, str_offs* oatt16_cname_offset, char* iatt16_cname_char, float* oatt20_cacctbal, str_offs* oatt19_cphone_offset, char* iatt19_cphone_char, str_offs* oatt3_nname_offset, char* iatt3_nname_char, str_offs* oatt17_caddress_offset, char* iatt17_caddress_char, str_offs* oatt22_ccomment_offset, char* iatt22_ccomment_char, float* oatt1_revenue) { int att15_ccustkey; str_t att16_cname; float att20_cacctbal; str_t att19_cphone; str_t att3_nname; str_t att17_caddress; str_t att22_ccomment; float att1_revenue; unsigned warplane = (threadIdx.x % 32); unsigned prefixlanes = (0xffffffff >> (32 - warplane)); int tid_aggregation11 = 0; unsigned loopVar = ((blockIdx.x * blockDim.x) + threadIdx.x); unsigned step = (blockDim.x * gridDim.x); unsigned flushPipeline = 0; int active = 0; while(!(flushPipeline)) { tid_aggregation11 = loopVar; active = (loopVar < 138748); // flush pipeline if no new elements flushPipeline = !(__ballot_sync(ALL_LANES,active)); if(active) { } // -------- scan aggregation ht (opId: 11) -------- if(active) { active &= ((aht11[tid_aggregation11].lock.lock == OnceLock::LOCK_DONE)); } if(active) { apayl11 payl = aht11[tid_aggregation11].payload; att15_ccustkey = payl.att15_ccustkey; att16_cname = payl.att16_cname; att20_cacctbal = payl.att20_cacctbal; att19_cphone = payl.att19_cphone; att3_nname = payl.att3_nname; att17_caddress = payl.att17_caddress; att22_ccomment = payl.att22_ccomment; } if(active) { att1_revenue = agg1[tid_aggregation11]; } // -------- materialize (opId: 12) -------- int wp; int writeMask; int numProj; writeMask = __ballot_sync(ALL_LANES,active); numProj = __popc(writeMask); if((warplane == 0)) { wp = atomicAdd(nout_result, numProj); } wp = __shfl_sync(ALL_LANES,wp,0); wp = (wp + __popc((writeMask & prefixlanes))); if(active) { oatt15_ccustkey[wp] = att15_ccustkey; oatt16_cname_offset[wp] = toStringOffset ( iatt16_cname_char, att16_cname); oatt20_cacctbal[wp] = att20_cacctbal; oatt19_cphone_offset[wp] = toStringOffset ( iatt19_cphone_char, att19_cphone); oatt3_nname_offset[wp] = toStringOffset ( iatt3_nname_char, att3_nname); oatt17_caddress_offset[wp] = toStringOffset ( iatt17_caddress_char, att17_caddress); oatt22_ccomment_offset[wp] = toStringOffset ( iatt22_ccomment_char, att22_ccomment); oatt1_revenue[wp] = att1_revenue; } loopVar += step; } } int main() { int* iatt2_nnationk; iatt2_nnationk = ( int*) map_memory_file ( "mmdb/nation_n_nationkey" ); size_t* iatt3_nname_offset; iatt3_nname_offset = ( size_t*) map_memory_file ( "mmdb/nation_n_name_offset" ); char* iatt3_nname_char; iatt3_nname_char = ( char*) map_memory_file ( "mmdb/nation_n_name_char" ); int* iatt6_oorderke; iatt6_oorderke = ( int*) map_memory_file ( "mmdb/orders_o_orderkey" ); int* iatt7_ocustkey; iatt7_ocustkey = ( int*) map_memory_file ( "mmdb/orders_o_custkey" ); unsigned* iatt10_oorderda; iatt10_oorderda = ( unsigned*) map_memory_file ( "mmdb/orders_o_orderdate" ); int* iatt15_ccustkey; iatt15_ccustkey = ( int*) map_memory_file ( "mmdb/customer_c_custkey" ); size_t* iatt16_cname_offset; iatt16_cname_offset = ( size_t*) map_memory_file ( "mmdb/customer_c_name_offset" ); char* iatt16_cname_char; iatt16_cname_char = ( char*) map_memory_file ( "mmdb/customer_c_name_char" ); size_t* iatt17_caddress_offset; iatt17_caddress_offset = ( size_t*) map_memory_file ( "mmdb/customer_c_address_offset" ); char* iatt17_caddress_char; iatt17_caddress_char = ( char*) map_memory_file ( "mmdb/customer_c_address_char" ); int* iatt18_cnationk; iatt18_cnationk = ( int*) map_memory_file ( "mmdb/customer_c_nationkey" ); size_t* iatt19_cphone_offset; iatt19_cphone_offset = ( size_t*) map_memory_file ( "mmdb/customer_c_phone_offset" ); char* iatt19_cphone_char; iatt19_cphone_char = ( char*) map_memory_file ( "mmdb/customer_c_phone_char" ); float* iatt20_cacctbal; iatt20_cacctbal = ( float*) map_memory_file ( "mmdb/customer_c_acctbal" ); size_t* iatt22_ccomment_offset; iatt22_ccomment_offset = ( size_t*) map_memory_file ( "mmdb/customer_c_comment_offset" ); char* iatt22_ccomment_char; iatt22_ccomment_char = ( char*) map_memory_file ( "mmdb/customer_c_comment_char" ); int* iatt23_lorderke; iatt23_lorderke = ( int*) map_memory_file ( "mmdb/lineitem_l_orderkey" ); float* iatt28_lextende; iatt28_lextende = ( float*) map_memory_file ( "mmdb/lineitem_l_extendedprice" ); float* iatt29_ldiscoun; iatt29_ldiscoun = ( float*) map_memory_file ( "mmdb/lineitem_l_discount" ); char* iatt31_lreturnf; iatt31_lreturnf = ( char*) map_memory_file ( "mmdb/lineitem_l_returnflag" ); int nout_result; std::vector < int > oatt15_ccustkey(69374); std::vector < str_offs > oatt16_cname_offset(69374); std::vector < float > oatt20_cacctbal(69374); std::vector < str_offs > oatt19_cphone_offset(69374); std::vector < str_offs > oatt3_nname_offset(69374); std::vector < str_offs > oatt17_caddress_offset(69374); std::vector < str_offs > oatt22_ccomment_offset(69374); std::vector < float > oatt1_revenue(69374); // wake up gpu hipDeviceSynchronize(); { hipError_t err = hipGetLastError(); if(err != hipSuccess) { std::cerr << "Cuda Error in wake up gpu! " << hipGetErrorString( err ) << std::endl; ERROR("wake up gpu") } } int* d_iatt2_nnationk; hipMalloc((void**) &d_iatt2_nnationk, 25* sizeof(int) ); size_t* d_iatt3_nname_offset; hipMalloc((void**) &d_iatt3_nname_offset, (25 + 1)* sizeof(size_t) ); char* d_iatt3_nname_char; hipMalloc((void**) &d_iatt3_nname_char, 186* sizeof(char) ); int* d_iatt6_oorderke; hipMalloc((void**) &d_iatt6_oorderke, 1500000* sizeof(int) ); int* d_iatt7_ocustkey; hipMalloc((void**) &d_iatt7_ocustkey, 1500000* sizeof(int) ); unsigned* d_iatt10_oorderda; hipMalloc((void**) &d_iatt10_oorderda, 1500000* sizeof(unsigned) ); int* d_iatt15_ccustkey; hipMalloc((void**) &d_iatt15_ccustkey, 150000* sizeof(int) ); size_t* d_iatt16_cname_offset; hipMalloc((void**) &d_iatt16_cname_offset, (150000 + 1)* sizeof(size_t) ); char* d_iatt16_cname_char; hipMalloc((void**) &d_iatt16_cname_char, 2700009* sizeof(char) ); size_t* d_iatt17_caddress_offset; hipMalloc((void**) &d_iatt17_caddress_offset, (150000 + 1)* sizeof(size_t) ); char* d_iatt17_caddress_char; hipMalloc((void**) &d_iatt17_caddress_char, 3753296* sizeof(char) ); int* d_iatt18_cnationk; hipMalloc((void**) &d_iatt18_cnationk, 150000* sizeof(int) ); size_t* d_iatt19_cphone_offset; hipMalloc((void**) &d_iatt19_cphone_offset, (150000 + 1)* sizeof(size_t) ); char* d_iatt19_cphone_char; hipMalloc((void**) &d_iatt19_cphone_char, 2250009* sizeof(char) ); float* d_iatt20_cacctbal; hipMalloc((void**) &d_iatt20_cacctbal, 150000* sizeof(float) ); size_t* d_iatt22_ccomment_offset; hipMalloc((void**) &d_iatt22_ccomment_offset, (150000 + 1)* sizeof(size_t) ); char* d_iatt22_ccomment_char; hipMalloc((void**) &d_iatt22_ccomment_char, 10836339* sizeof(char) ); int* d_iatt23_lorderke; hipMalloc((void**) &d_iatt23_lorderke, 6001215* sizeof(int) ); float* d_iatt28_lextende; hipMalloc((void**) &d_iatt28_lextende, 6001215* sizeof(float) ); float* d_iatt29_ldiscoun; hipMalloc((void**) &d_iatt29_ldiscoun, 6001215* sizeof(float) ); char* d_iatt31_lreturnf; hipMalloc((void**) &d_iatt31_lreturnf, 6001215* sizeof(char) ); int* d_nout_result; hipMalloc((void**) &d_nout_result, 1* sizeof(int) ); int* d_oatt15_ccustkey; hipMalloc((void**) &d_oatt15_ccustkey, 69374* sizeof(int) ); str_offs* d_oatt16_cname_offset; hipMalloc((void**) &d_oatt16_cname_offset, 69374* sizeof(str_offs) ); float* d_oatt20_cacctbal; hipMalloc((void**) &d_oatt20_cacctbal, 69374* sizeof(float) ); str_offs* d_oatt19_cphone_offset; hipMalloc((void**) &d_oatt19_cphone_offset, 69374* sizeof(str_offs) ); str_offs* d_oatt3_nname_offset; hipMalloc((void**) &d_oatt3_nname_offset, 69374* sizeof(str_offs) ); str_offs* d_oatt17_caddress_offset; hipMalloc((void**) &d_oatt17_caddress_offset, 69374* sizeof(str_offs) ); str_offs* d_oatt22_ccomment_offset; hipMalloc((void**) &d_oatt22_ccomment_offset, 69374* sizeof(str_offs) ); float* d_oatt1_revenue; hipMalloc((void**) &d_oatt1_revenue, 69374* sizeof(float) ); hipDeviceSynchronize(); { hipError_t err = hipGetLastError(); if(err != hipSuccess) { std::cerr << "Cuda Error in cuda malloc! " << hipGetErrorString( err ) << std::endl; ERROR("cuda malloc") } } // show memory usage of GPU { size_t free_byte ; size_t total_byte ; hipError_t cuda_status = hipMemGetInfo( &free_byte, &total_byte ) ; if ( hipSuccess != cuda_status ) { printf("Error: hipMemGetInfo fails, %s \n", hipGetErrorString(cuda_status) ); exit(1); } double free_db = (double)free_byte ; double total_db = (double)total_byte ; double used_db = total_db - free_db ; fprintf(stderr, "Memory %.1f / %.1f GB\n", used_db/(1024*1024*1024), total_db/(1024*1024*1024) ); fflush(stdout); } unique_ht<jpayl6>* d_jht6; hipMalloc((void**) &d_jht6, 50* sizeof(unique_ht<jpayl6>) ); { int gridsize=100; int blocksize=32; hipLaunchKernelGGL(( initUniqueHT), dim3(gridsize), dim3(blocksize), 0, 0, d_jht6, 50); } multi_ht* d_jht5; hipMalloc((void**) &d_jht5, 150000* sizeof(multi_ht) ); jpayl5* d_jht5_payload; hipMalloc((void**) &d_jht5_payload, 150000* sizeof(jpayl5) ); { int gridsize=100; int blocksize=32; hipLaunchKernelGGL(( initMultiHT), dim3(gridsize), dim3(blocksize), 0, 0, d_jht5, 150000); } int* d_offs5; hipMalloc((void**) &d_offs5, 1* sizeof(int) ); { int gridsize=100; int blocksize=32; hipLaunchKernelGGL(( initArray), dim3(gridsize), dim3(blocksize), 0, 0, d_offs5, 0, 1); } unique_ht<jpayl9>* d_jht9; hipMalloc((void**) &d_jht9, 300000* sizeof(unique_ht<jpayl9>) ); { int gridsize=100; int blocksize=32; hipLaunchKernelGGL(( initUniqueHT), dim3(gridsize), dim3(blocksize), 0, 0, d_jht9, 300000); } agg_ht<apayl11>* d_aht11; hipMalloc((void**) &d_aht11, 138748* sizeof(agg_ht<apayl11>) ); { int gridsize=100; int blocksize=32; hipLaunchKernelGGL(( initAggHT), dim3(gridsize), dim3(blocksize), 0, 0, d_aht11, 138748); } float* d_agg1; hipMalloc((void**) &d_agg1, 138748* sizeof(float) ); { int gridsize=100; int blocksize=32; hipLaunchKernelGGL(( initArray), dim3(gridsize), dim3(blocksize), 0, 0, d_agg1, 0.0f, 138748); } { int gridsize=100; int blocksize=32; hipLaunchKernelGGL(( initArray), dim3(gridsize), dim3(blocksize), 0, 0, d_nout_result, 0, 1); } hipDeviceSynchronize(); { hipError_t err = hipGetLastError(); if(err != hipSuccess) { std::cerr << "Cuda Error in cuda mallocHT! " << hipGetErrorString( err ) << std::endl; ERROR("cuda mallocHT") } } // show memory usage of GPU { size_t free_byte ; size_t total_byte ; hipError_t cuda_status = hipMemGetInfo( &free_byte, &total_byte ) ; if ( hipSuccess != cuda_status ) { printf("Error: hipMemGetInfo fails, %s \n", hipGetErrorString(cuda_status) ); exit(1); } double free_db = (double)free_byte ; double total_db = (double)total_byte ; double used_db = total_db - free_db ; fprintf(stderr, "Memory %.1f / %.1f GB\n", used_db/(1024*1024*1024), total_db/(1024*1024*1024) ); fflush(stdout); } hipMemcpy( d_iatt2_nnationk, iatt2_nnationk, 25 * sizeof(int), hipMemcpyHostToDevice); hipMemcpy( d_iatt3_nname_offset, iatt3_nname_offset, (25 + 1) * sizeof(size_t), hipMemcpyHostToDevice); hipMemcpy( d_iatt3_nname_char, iatt3_nname_char, 186 * sizeof(char), hipMemcpyHostToDevice); hipMemcpy( d_iatt6_oorderke, iatt6_oorderke, 1500000 * sizeof(int), hipMemcpyHostToDevice); hipMemcpy( d_iatt7_ocustkey, iatt7_ocustkey, 1500000 * sizeof(int), hipMemcpyHostToDevice); hipMemcpy( d_iatt10_oorderda, iatt10_oorderda, 1500000 * sizeof(unsigned), hipMemcpyHostToDevice); hipMemcpy( d_iatt15_ccustkey, iatt15_ccustkey, 150000 * sizeof(int), hipMemcpyHostToDevice); hipMemcpy( d_iatt16_cname_offset, iatt16_cname_offset, (150000 + 1) * sizeof(size_t), hipMemcpyHostToDevice); hipMemcpy( d_iatt16_cname_char, iatt16_cname_char, 2700009 * sizeof(char), hipMemcpyHostToDevice); hipMemcpy( d_iatt17_caddress_offset, iatt17_caddress_offset, (150000 + 1) * sizeof(size_t), hipMemcpyHostToDevice); hipMemcpy( d_iatt17_caddress_char, iatt17_caddress_char, 3753296 * sizeof(char), hipMemcpyHostToDevice); hipMemcpy( d_iatt18_cnationk, iatt18_cnationk, 150000 * sizeof(int), hipMemcpyHostToDevice); hipMemcpy( d_iatt19_cphone_offset, iatt19_cphone_offset, (150000 + 1) * sizeof(size_t), hipMemcpyHostToDevice); hipMemcpy( d_iatt19_cphone_char, iatt19_cphone_char, 2250009 * sizeof(char), hipMemcpyHostToDevice); hipMemcpy( d_iatt20_cacctbal, iatt20_cacctbal, 150000 * sizeof(float), hipMemcpyHostToDevice); hipMemcpy( d_iatt22_ccomment_offset, iatt22_ccomment_offset, (150000 + 1) * sizeof(size_t), hipMemcpyHostToDevice); hipMemcpy( d_iatt22_ccomment_char, iatt22_ccomment_char, 10836339 * sizeof(char), hipMemcpyHostToDevice); hipMemcpy( d_iatt23_lorderke, iatt23_lorderke, 6001215 * sizeof(int), hipMemcpyHostToDevice); hipMemcpy( d_iatt28_lextende, iatt28_lextende, 6001215 * sizeof(float), hipMemcpyHostToDevice); hipMemcpy( d_iatt29_ldiscoun, iatt29_ldiscoun, 6001215 * sizeof(float), hipMemcpyHostToDevice); hipMemcpy( d_iatt31_lreturnf, iatt31_lreturnf, 6001215 * sizeof(char), hipMemcpyHostToDevice); hipDeviceSynchronize(); { hipError_t err = hipGetLastError(); if(err != hipSuccess) { std::cerr << "Cuda Error in cuda memcpy in! " << hipGetErrorString( err ) << std::endl; ERROR("cuda memcpy in") } } std::clock_t start_totalKernelTime0 = std::clock(); std::clock_t start_krnl_nation11 = std::clock(); { int gridsize=100; int blocksize=32; hipLaunchKernelGGL(( krnl_nation1), dim3(gridsize), dim3(blocksize), 0, 0, d_iatt2_nnationk, d_iatt3_nname_offset, d_iatt3_nname_char, d_jht6); } hipDeviceSynchronize(); std::clock_t stop_krnl_nation11 = std::clock(); { hipError_t err = hipGetLastError(); if(err != hipSuccess) { std::cerr << "Cuda Error in krnl_nation1! " << hipGetErrorString( err ) << std::endl; ERROR("krnl_nation1") } } std::clock_t start_krnl_orders22 = std::clock(); { int gridsize=100; int blocksize=32; hipLaunchKernelGGL(( krnl_orders2), dim3(gridsize), dim3(blocksize), 0, 0, d_iatt6_oorderke, d_iatt7_ocustkey, d_iatt10_oorderda, d_jht5, d_jht5_payload); } hipDeviceSynchronize(); std::clock_t stop_krnl_orders22 = std::clock(); { hipError_t err = hipGetLastError(); if(err != hipSuccess) { std::cerr << "Cuda Error in krnl_orders2! " << hipGetErrorString( err ) << std::endl; ERROR("krnl_orders2") } } std::clock_t start_scanMultiHT3 = std::clock(); { int gridsize=100; int blocksize=32; hipLaunchKernelGGL(( scanMultiHT), dim3(gridsize), dim3(blocksize), 0, 0, d_jht5, 150000, d_offs5); } hipDeviceSynchronize(); std::clock_t stop_scanMultiHT3 = std::clock(); { hipError_t err = hipGetLastError(); if(err != hipSuccess) { std::cerr << "Cuda Error in scanMultiHT! " << hipGetErrorString( err ) << std::endl; ERROR("scanMultiHT") } } std::clock_t start_krnl_orders2_ins4 = std::clock(); { int gridsize=100; int blocksize=32; hipLaunchKernelGGL(( krnl_orders2_ins), dim3(gridsize), dim3(blocksize), 0, 0, d_iatt6_oorderke, d_iatt7_ocustkey, d_iatt10_oorderda, d_jht5, d_jht5_payload, d_offs5); } hipDeviceSynchronize(); std::clock_t stop_krnl_orders2_ins4 = std::clock(); { hipError_t err = hipGetLastError(); if(err != hipSuccess) { std::cerr << "Cuda Error in krnl_orders2_ins! " << hipGetErrorString( err ) << std::endl; ERROR("krnl_orders2_ins") } } std::clock_t start_krnl_customer45 = std::clock(); { int gridsize=100; int blocksize=32; hipLaunchKernelGGL(( krnl_customer4), dim3(gridsize), dim3(blocksize), 0, 0, d_iatt15_ccustkey, d_iatt16_cname_offset, d_iatt16_cname_char, d_iatt17_caddress_offset, d_iatt17_caddress_char, d_iatt18_cnationk, d_iatt19_cphone_offset, d_iatt19_cphone_char, d_iatt20_cacctbal, d_iatt22_ccomment_offset, d_iatt22_ccomment_char, d_jht5, d_jht5_payload, d_jht6, d_jht9); } hipDeviceSynchronize(); std::clock_t stop_krnl_customer45 = std::clock(); { hipError_t err = hipGetLastError(); if(err != hipSuccess) { std::cerr << "Cuda Error in krnl_customer4! " << hipGetErrorString( err ) << std::endl; ERROR("krnl_customer4") } } std::clock_t start_krnl_lineitem76 = std::clock(); { int gridsize=100; int blocksize=32; hipLaunchKernelGGL(( krnl_lineitem7), dim3(gridsize), dim3(blocksize), 0, 0, d_iatt23_lorderke, d_iatt28_lextende, d_iatt29_ldiscoun, d_iatt31_lreturnf, d_jht9, d_aht11, d_agg1); } hipDeviceSynchronize(); std::clock_t stop_krnl_lineitem76 = std::clock(); { hipError_t err = hipGetLastError(); if(err != hipSuccess) { std::cerr << "Cuda Error in krnl_lineitem7! " << hipGetErrorString( err ) << std::endl; ERROR("krnl_lineitem7") } } std::clock_t start_krnl_aggregation117 = std::clock(); { int gridsize=100; int blocksize=32; hipLaunchKernelGGL(( krnl_aggregation11), dim3(gridsize), dim3(blocksize), 0, 0, d_aht11, d_agg1, d_nout_result, d_oatt15_ccustkey, d_oatt16_cname_offset, d_iatt16_cname_char, d_oatt20_cacctbal, d_oatt19_cphone_offset, d_iatt19_cphone_char, d_oatt3_nname_offset, d_iatt3_nname_char, d_oatt17_caddress_offset, d_iatt17_caddress_char, d_oatt22_ccomment_offset, d_iatt22_ccomment_char, d_oatt1_revenue); } hipDeviceSynchronize(); std::clock_t stop_krnl_aggregation117 = std::clock(); { hipError_t err = hipGetLastError(); if(err != hipSuccess) { std::cerr << "Cuda Error in krnl_aggregation11! " << hipGetErrorString( err ) << std::endl; ERROR("krnl_aggregation11") } } std::clock_t stop_totalKernelTime0 = std::clock(); hipMemcpy( &nout_result, d_nout_result, 1 * sizeof(int), hipMemcpyDeviceToHost); hipMemcpy( oatt15_ccustkey.data(), d_oatt15_ccustkey, 69374 * sizeof(int), hipMemcpyDeviceToHost); hipMemcpy( oatt16_cname_offset.data(), d_oatt16_cname_offset, 69374 * sizeof(str_offs), hipMemcpyDeviceToHost); hipMemcpy( oatt20_cacctbal.data(), d_oatt20_cacctbal, 69374 * sizeof(float), hipMemcpyDeviceToHost); hipMemcpy( oatt19_cphone_offset.data(), d_oatt19_cphone_offset, 69374 * sizeof(str_offs), hipMemcpyDeviceToHost); hipMemcpy( oatt3_nname_offset.data(), d_oatt3_nname_offset, 69374 * sizeof(str_offs), hipMemcpyDeviceToHost); hipMemcpy( oatt17_caddress_offset.data(), d_oatt17_caddress_offset, 69374 * sizeof(str_offs), hipMemcpyDeviceToHost); hipMemcpy( oatt22_ccomment_offset.data(), d_oatt22_ccomment_offset, 69374 * sizeof(str_offs), hipMemcpyDeviceToHost); hipMemcpy( oatt1_revenue.data(), d_oatt1_revenue, 69374 * sizeof(float), hipMemcpyDeviceToHost); hipDeviceSynchronize(); { hipError_t err = hipGetLastError(); if(err != hipSuccess) { std::cerr << "Cuda Error in cuda memcpy out! " << hipGetErrorString( err ) << std::endl; ERROR("cuda memcpy out") } } hipFree( d_iatt2_nnationk); hipFree( d_iatt3_nname_offset); hipFree( d_iatt3_nname_char); hipFree( d_jht6); hipFree( d_iatt6_oorderke); hipFree( d_iatt7_ocustkey); hipFree( d_iatt10_oorderda); hipFree( d_jht5); hipFree( d_jht5_payload); hipFree( d_offs5); hipFree( d_iatt15_ccustkey); hipFree( d_iatt16_cname_offset); hipFree( d_iatt16_cname_char); hipFree( d_iatt17_caddress_offset); hipFree( d_iatt17_caddress_char); hipFree( d_iatt18_cnationk); hipFree( d_iatt19_cphone_offset); hipFree( d_iatt19_cphone_char); hipFree( d_iatt20_cacctbal); hipFree( d_iatt22_ccomment_offset); hipFree( d_iatt22_ccomment_char); hipFree( d_jht9); hipFree( d_iatt23_lorderke); hipFree( d_iatt28_lextende); hipFree( d_iatt29_ldiscoun); hipFree( d_iatt31_lreturnf); hipFree( d_aht11); hipFree( d_agg1); hipFree( d_nout_result); hipFree( d_oatt15_ccustkey); hipFree( d_oatt16_cname_offset); hipFree( d_oatt20_cacctbal); hipFree( d_oatt19_cphone_offset); hipFree( d_oatt3_nname_offset); hipFree( d_oatt17_caddress_offset); hipFree( d_oatt22_ccomment_offset); hipFree( d_oatt1_revenue); hipDeviceSynchronize(); { hipError_t err = hipGetLastError(); if(err != hipSuccess) { std::cerr << "Cuda Error in cuda free! " << hipGetErrorString( err ) << std::endl; ERROR("cuda free") } } std::clock_t start_finish8 = std::clock(); printf("\nResult: %i tuples\n", nout_result); if((nout_result > 69374)) { ERROR("Index out of range. Output size larger than allocated with expected result number.") } for ( int pv = 0; ((pv < 10) && (pv < nout_result)); pv += 1) { printf("c_custkey: "); printf("%8i", oatt15_ccustkey[pv]); printf(" "); printf("c_name: "); stringPrint ( iatt16_cname_char, oatt16_cname_offset[pv]); printf(" "); printf("c_acctbal: "); printf("%15.2f", oatt20_cacctbal[pv]); printf(" "); printf("c_phone: "); stringPrint ( iatt19_cphone_char, oatt19_cphone_offset[pv]); printf(" "); printf("n_name: "); stringPrint ( iatt3_nname_char, oatt3_nname_offset[pv]); printf(" "); printf("c_address: "); stringPrint ( iatt17_caddress_char, oatt17_caddress_offset[pv]); printf(" "); printf("c_comment: "); stringPrint ( iatt22_ccomment_char, oatt22_ccomment_offset[pv]); printf(" "); printf("revenue: "); printf("%15.2f", oatt1_revenue[pv]); printf(" "); printf("\n"); } if((nout_result > 10)) { printf("[...]\n"); } printf("\n"); std::clock_t stop_finish8 = std::clock(); printf("<timing>\n"); printf ( "%32s: %6.1f ms\n", "krnl_nation1", (stop_krnl_nation11 - start_krnl_nation11) / (double) (CLOCKS_PER_SEC / 1000) ); printf ( "%32s: %6.1f ms\n", "krnl_orders2", (stop_krnl_orders22 - start_krnl_orders22) / (double) (CLOCKS_PER_SEC / 1000) ); printf ( "%32s: %6.1f ms\n", "scanMultiHT", (stop_scanMultiHT3 - start_scanMultiHT3) / (double) (CLOCKS_PER_SEC / 1000) ); printf ( "%32s: %6.1f ms\n", "krnl_orders2_ins", (stop_krnl_orders2_ins4 - start_krnl_orders2_ins4) / (double) (CLOCKS_PER_SEC / 1000) ); printf ( "%32s: %6.1f ms\n", "krnl_customer4", (stop_krnl_customer45 - start_krnl_customer45) / (double) (CLOCKS_PER_SEC / 1000) ); printf ( "%32s: %6.1f ms\n", "krnl_lineitem7", (stop_krnl_lineitem76 - start_krnl_lineitem76) / (double) (CLOCKS_PER_SEC / 1000) ); printf ( "%32s: %6.1f ms\n", "krnl_aggregation11", (stop_krnl_aggregation117 - start_krnl_aggregation117) / (double) (CLOCKS_PER_SEC / 1000) ); printf ( "%32s: %6.1f ms\n", "finish", (stop_finish8 - start_finish8) / (double) (CLOCKS_PER_SEC / 1000) ); printf ( "%32s: %6.1f ms\n", "totalKernelTime", (stop_totalKernelTime0 - start_totalKernelTime0) / (double) (CLOCKS_PER_SEC / 1000) ); printf("</timing>\n"); }
2f9e0ff8b5f2e265c1a986cfec2d4f11cc5e0fb0.cu
#include <list> #include <unordered_map> #include <vector> #include <iostream> #include <ctime> #include <limits.h> #include <float.h> #include "../dogqc/include/csv.h" #include "../dogqc/include/util.h" #include "../dogqc/include/mappedmalloc.h" #include "../dogqc/include/util.cuh" #include "../dogqc/include/hashing.cuh" struct jpayl6 { int att2_nnationk; str_t att3_nname; }; struct jpayl5 { int att6_oorderke; int att7_ocustkey; }; struct jpayl9 { str_t att3_nname; int att6_oorderke; int att15_ccustkey; str_t att16_cname; str_t att17_caddress; str_t att19_cphone; float att20_cacctbal; str_t att22_ccomment; }; struct apayl11 { int att15_ccustkey; str_t att16_cname; float att20_cacctbal; str_t att19_cphone; str_t att3_nname; str_t att17_caddress; str_t att22_ccomment; }; __global__ void krnl_nation1( int* iatt2_nnationk, size_t* iatt3_nname_offset, char* iatt3_nname_char, unique_ht<jpayl6>* jht6) { int att2_nnationk; str_t att3_nname; int tid_nation1 = 0; unsigned loopVar = ((blockIdx.x * blockDim.x) + threadIdx.x); unsigned step = (blockDim.x * gridDim.x); unsigned flushPipeline = 0; int active = 0; while(!(flushPipeline)) { tid_nation1 = loopVar; active = (loopVar < 25); // flush pipeline if no new elements flushPipeline = !(__ballot_sync(ALL_LANES,active)); if(active) { att2_nnationk = iatt2_nnationk[tid_nation1]; att3_nname = stringScan ( iatt3_nname_offset, iatt3_nname_char, tid_nation1); } // -------- hash join build (opId: 6) -------- if(active) { jpayl6 payl6; payl6.att2_nnationk = att2_nnationk; payl6.att3_nname = att3_nname; uint64_t hash6; hash6 = 0; if(active) { hash6 = hash ( (hash6 + ((uint64_t)att2_nnationk))); } hashBuildUnique ( jht6, 50, hash6, &(payl6)); } loopVar += step; } } __global__ void krnl_orders2( int* iatt6_oorderke, int* iatt7_ocustkey, unsigned* iatt10_oorderda, multi_ht* jht5, jpayl5* jht5_payload) { int att6_oorderke; int att7_ocustkey; unsigned att10_oorderda; int tid_orders1 = 0; unsigned loopVar = ((blockIdx.x * blockDim.x) + threadIdx.x); unsigned step = (blockDim.x * gridDim.x); unsigned flushPipeline = 0; int active = 0; while(!(flushPipeline)) { tid_orders1 = loopVar; active = (loopVar < 1500000); // flush pipeline if no new elements flushPipeline = !(__ballot_sync(ALL_LANES,active)); if(active) { att6_oorderke = iatt6_oorderke[tid_orders1]; att7_ocustkey = iatt7_ocustkey[tid_orders1]; att10_oorderda = iatt10_oorderda[tid_orders1]; } // -------- selection (opId: 3) -------- if(active) { active = ((att10_oorderda >= 19931001) && (att10_oorderda < 19940101)); } // -------- hash join build (opId: 5) -------- if(active) { uint64_t hash5 = 0; if(active) { hash5 = 0; if(active) { hash5 = hash ( (hash5 + ((uint64_t)att7_ocustkey))); } } hashCountMulti ( jht5, 150000, hash5); } loopVar += step; } } __global__ void krnl_orders2_ins( int* iatt6_oorderke, int* iatt7_ocustkey, unsigned* iatt10_oorderda, multi_ht* jht5, jpayl5* jht5_payload, int* offs5) { int att6_oorderke; int att7_ocustkey; unsigned att10_oorderda; int tid_orders1 = 0; unsigned loopVar = ((blockIdx.x * blockDim.x) + threadIdx.x); unsigned step = (blockDim.x * gridDim.x); unsigned flushPipeline = 0; int active = 0; while(!(flushPipeline)) { tid_orders1 = loopVar; active = (loopVar < 1500000); // flush pipeline if no new elements flushPipeline = !(__ballot_sync(ALL_LANES,active)); if(active) { att6_oorderke = iatt6_oorderke[tid_orders1]; att7_ocustkey = iatt7_ocustkey[tid_orders1]; att10_oorderda = iatt10_oorderda[tid_orders1]; } // -------- selection (opId: 3) -------- if(active) { active = ((att10_oorderda >= 19931001) && (att10_oorderda < 19940101)); } // -------- hash join build (opId: 5) -------- if(active) { uint64_t hash5 = 0; if(active) { hash5 = 0; if(active) { hash5 = hash ( (hash5 + ((uint64_t)att7_ocustkey))); } } jpayl5 payl; payl.att6_oorderke = att6_oorderke; payl.att7_ocustkey = att7_ocustkey; hashInsertMulti ( jht5, jht5_payload, offs5, 150000, hash5, &(payl)); } loopVar += step; } } __global__ void krnl_customer4( int* iatt15_ccustkey, size_t* iatt16_cname_offset, char* iatt16_cname_char, size_t* iatt17_caddress_offset, char* iatt17_caddress_char, int* iatt18_cnationk, size_t* iatt19_cphone_offset, char* iatt19_cphone_char, float* iatt20_cacctbal, size_t* iatt22_ccomment_offset, char* iatt22_ccomment_char, multi_ht* jht5, jpayl5* jht5_payload, unique_ht<jpayl6>* jht6, unique_ht<jpayl9>* jht9) { int att15_ccustkey; str_t att16_cname; str_t att17_caddress; int att18_cnationk; str_t att19_cphone; float att20_cacctbal; str_t att22_ccomment; int att6_oorderke; int att7_ocustkey; int att2_nnationk; str_t att3_nname; int tid_customer1 = 0; unsigned loopVar = ((blockIdx.x * blockDim.x) + threadIdx.x); unsigned step = (blockDim.x * gridDim.x); unsigned flushPipeline = 0; int active = 0; while(!(flushPipeline)) { tid_customer1 = loopVar; active = (loopVar < 150000); // flush pipeline if no new elements flushPipeline = !(__ballot_sync(ALL_LANES,active)); if(active) { att15_ccustkey = iatt15_ccustkey[tid_customer1]; att16_cname = stringScan ( iatt16_cname_offset, iatt16_cname_char, tid_customer1); att17_caddress = stringScan ( iatt17_caddress_offset, iatt17_caddress_char, tid_customer1); att18_cnationk = iatt18_cnationk[tid_customer1]; att19_cphone = stringScan ( iatt19_cphone_offset, iatt19_cphone_char, tid_customer1); att20_cacctbal = iatt20_cacctbal[tid_customer1]; att22_ccomment = stringScan ( iatt22_ccomment_offset, iatt22_ccomment_char, tid_customer1); } // -------- hash join probe (opId: 5) -------- int matchEnd5 = 0; int matchOffset5 = 0; int matchStep5 = 1; int matchFound5 = 0; int probeActive5 = active; uint64_t hash5 = 0; if(probeActive5) { hash5 = 0; if(active) { hash5 = hash ( (hash5 + ((uint64_t)att15_ccustkey))); } probeActive5 = hashProbeMulti ( jht5, 150000, hash5, matchOffset5, matchEnd5); } active = probeActive5; while(__any_sync(ALL_LANES,active)) { probeActive5 = active; jpayl5 payl; if(probeActive5) { payl = jht5_payload[matchOffset5]; att6_oorderke = payl.att6_oorderke; att7_ocustkey = payl.att7_ocustkey; active &= ((att7_ocustkey == att15_ccustkey)); matchFound5 += active; } // -------- hash join probe (opId: 6) -------- uint64_t hash6 = 0; if(active) { hash6 = 0; if(active) { hash6 = hash ( (hash6 + ((uint64_t)att18_cnationk))); } } jpayl6* probepayl6; int numLookups6 = 0; if(active) { active = hashProbeUnique ( jht6, 50, hash6, numLookups6, &(probepayl6)); } int bucketFound6 = 0; int probeActive6 = active; while((probeActive6 && !(bucketFound6))) { jpayl6 jprobepayl6 = *(probepayl6); att2_nnationk = jprobepayl6.att2_nnationk; att3_nname = jprobepayl6.att3_nname; bucketFound6 = 1; bucketFound6 &= ((att2_nnationk == att18_cnationk)); if(!(bucketFound6)) { probeActive6 = hashProbeUnique ( jht6, 50, hash6, numLookups6, &(probepayl6)); } } active = bucketFound6; // -------- hash join build (opId: 9) -------- if(active) { jpayl9 payl9; payl9.att3_nname = att3_nname; payl9.att6_oorderke = att6_oorderke; payl9.att15_ccustkey = att15_ccustkey; payl9.att16_cname = att16_cname; payl9.att17_caddress = att17_caddress; payl9.att19_cphone = att19_cphone; payl9.att20_cacctbal = att20_cacctbal; payl9.att22_ccomment = att22_ccomment; uint64_t hash9; hash9 = 0; if(active) { hash9 = hash ( (hash9 + ((uint64_t)att6_oorderke))); } hashBuildUnique ( jht9, 300000, hash9, &(payl9)); } matchOffset5 += matchStep5; probeActive5 &= ((matchOffset5 < matchEnd5)); active = probeActive5; } loopVar += step; } } __global__ void krnl_lineitem7( int* iatt23_lorderke, float* iatt28_lextende, float* iatt29_ldiscoun, char* iatt31_lreturnf, unique_ht<jpayl9>* jht9, agg_ht<apayl11>* aht11, float* agg1) { int att23_lorderke; float att28_lextende; float att29_ldiscoun; char att31_lreturnf; str_t att3_nname; int att6_oorderke; int att15_ccustkey; str_t att16_cname; str_t att17_caddress; str_t att19_cphone; float att20_cacctbal; str_t att22_ccomment; float att39_rev; int tid_lineitem1 = 0; unsigned loopVar = ((blockIdx.x * blockDim.x) + threadIdx.x); unsigned step = (blockDim.x * gridDim.x); unsigned flushPipeline = 0; int active = 0; while(!(flushPipeline)) { tid_lineitem1 = loopVar; active = (loopVar < 6001215); // flush pipeline if no new elements flushPipeline = !(__ballot_sync(ALL_LANES,active)); if(active) { att23_lorderke = iatt23_lorderke[tid_lineitem1]; att28_lextende = iatt28_lextende[tid_lineitem1]; att29_ldiscoun = iatt29_ldiscoun[tid_lineitem1]; att31_lreturnf = iatt31_lreturnf[tid_lineitem1]; } // -------- selection (opId: 8) -------- if(active) { active = (att31_lreturnf == 'R'); } // -------- hash join probe (opId: 9) -------- uint64_t hash9 = 0; if(active) { hash9 = 0; if(active) { hash9 = hash ( (hash9 + ((uint64_t)att23_lorderke))); } } jpayl9* probepayl9; int numLookups9 = 0; if(active) { active = hashProbeUnique ( jht9, 300000, hash9, numLookups9, &(probepayl9)); } int bucketFound9 = 0; int probeActive9 = active; while((probeActive9 && !(bucketFound9))) { jpayl9 jprobepayl9 = *(probepayl9); att3_nname = jprobepayl9.att3_nname; att6_oorderke = jprobepayl9.att6_oorderke; att15_ccustkey = jprobepayl9.att15_ccustkey; att16_cname = jprobepayl9.att16_cname; att17_caddress = jprobepayl9.att17_caddress; att19_cphone = jprobepayl9.att19_cphone; att20_cacctbal = jprobepayl9.att20_cacctbal; att22_ccomment = jprobepayl9.att22_ccomment; bucketFound9 = 1; bucketFound9 &= ((att6_oorderke == att23_lorderke)); if(!(bucketFound9)) { probeActive9 = hashProbeUnique ( jht9, 300000, hash9, numLookups9, &(probepayl9)); } } active = bucketFound9; // -------- map (opId: 10) -------- if(active) { att39_rev = (att28_lextende * ((float)1.0f - att29_ldiscoun)); } // -------- aggregation (opId: 11) -------- int bucket = 0; if(active) { uint64_t hash11 = 0; hash11 = 0; if(active) { hash11 = hash ( (hash11 + ((uint64_t)att15_ccustkey))); } hash11 = hash ( (hash11 + stringHash ( att16_cname))); if(active) { hash11 = hash ( (hash11 + ((uint64_t)att20_cacctbal))); } hash11 = hash ( (hash11 + stringHash ( att19_cphone))); hash11 = hash ( (hash11 + stringHash ( att3_nname))); hash11 = hash ( (hash11 + stringHash ( att17_caddress))); hash11 = hash ( (hash11 + stringHash ( att22_ccomment))); apayl11 payl; payl.att15_ccustkey = att15_ccustkey; payl.att16_cname = att16_cname; payl.att20_cacctbal = att20_cacctbal; payl.att19_cphone = att19_cphone; payl.att3_nname = att3_nname; payl.att17_caddress = att17_caddress; payl.att22_ccomment = att22_ccomment; int bucketFound = 0; int numLookups = 0; while(!(bucketFound)) { bucket = hashAggregateGetBucket ( aht11, 138748, hash11, numLookups, &(payl)); apayl11 probepayl = aht11[bucket].payload; bucketFound = 1; bucketFound &= ((payl.att15_ccustkey == probepayl.att15_ccustkey)); bucketFound &= (stringEquals ( payl.att16_cname, probepayl.att16_cname)); bucketFound &= ((payl.att20_cacctbal == probepayl.att20_cacctbal)); bucketFound &= (stringEquals ( payl.att19_cphone, probepayl.att19_cphone)); bucketFound &= (stringEquals ( payl.att3_nname, probepayl.att3_nname)); bucketFound &= (stringEquals ( payl.att17_caddress, probepayl.att17_caddress)); bucketFound &= (stringEquals ( payl.att22_ccomment, probepayl.att22_ccomment)); } } if(active) { atomicAdd(&(agg1[bucket]), ((float)att39_rev)); } loopVar += step; } } __global__ void krnl_aggregation11( agg_ht<apayl11>* aht11, float* agg1, int* nout_result, int* oatt15_ccustkey, str_offs* oatt16_cname_offset, char* iatt16_cname_char, float* oatt20_cacctbal, str_offs* oatt19_cphone_offset, char* iatt19_cphone_char, str_offs* oatt3_nname_offset, char* iatt3_nname_char, str_offs* oatt17_caddress_offset, char* iatt17_caddress_char, str_offs* oatt22_ccomment_offset, char* iatt22_ccomment_char, float* oatt1_revenue) { int att15_ccustkey; str_t att16_cname; float att20_cacctbal; str_t att19_cphone; str_t att3_nname; str_t att17_caddress; str_t att22_ccomment; float att1_revenue; unsigned warplane = (threadIdx.x % 32); unsigned prefixlanes = (0xffffffff >> (32 - warplane)); int tid_aggregation11 = 0; unsigned loopVar = ((blockIdx.x * blockDim.x) + threadIdx.x); unsigned step = (blockDim.x * gridDim.x); unsigned flushPipeline = 0; int active = 0; while(!(flushPipeline)) { tid_aggregation11 = loopVar; active = (loopVar < 138748); // flush pipeline if no new elements flushPipeline = !(__ballot_sync(ALL_LANES,active)); if(active) { } // -------- scan aggregation ht (opId: 11) -------- if(active) { active &= ((aht11[tid_aggregation11].lock.lock == OnceLock::LOCK_DONE)); } if(active) { apayl11 payl = aht11[tid_aggregation11].payload; att15_ccustkey = payl.att15_ccustkey; att16_cname = payl.att16_cname; att20_cacctbal = payl.att20_cacctbal; att19_cphone = payl.att19_cphone; att3_nname = payl.att3_nname; att17_caddress = payl.att17_caddress; att22_ccomment = payl.att22_ccomment; } if(active) { att1_revenue = agg1[tid_aggregation11]; } // -------- materialize (opId: 12) -------- int wp; int writeMask; int numProj; writeMask = __ballot_sync(ALL_LANES,active); numProj = __popc(writeMask); if((warplane == 0)) { wp = atomicAdd(nout_result, numProj); } wp = __shfl_sync(ALL_LANES,wp,0); wp = (wp + __popc((writeMask & prefixlanes))); if(active) { oatt15_ccustkey[wp] = att15_ccustkey; oatt16_cname_offset[wp] = toStringOffset ( iatt16_cname_char, att16_cname); oatt20_cacctbal[wp] = att20_cacctbal; oatt19_cphone_offset[wp] = toStringOffset ( iatt19_cphone_char, att19_cphone); oatt3_nname_offset[wp] = toStringOffset ( iatt3_nname_char, att3_nname); oatt17_caddress_offset[wp] = toStringOffset ( iatt17_caddress_char, att17_caddress); oatt22_ccomment_offset[wp] = toStringOffset ( iatt22_ccomment_char, att22_ccomment); oatt1_revenue[wp] = att1_revenue; } loopVar += step; } } int main() { int* iatt2_nnationk; iatt2_nnationk = ( int*) map_memory_file ( "mmdb/nation_n_nationkey" ); size_t* iatt3_nname_offset; iatt3_nname_offset = ( size_t*) map_memory_file ( "mmdb/nation_n_name_offset" ); char* iatt3_nname_char; iatt3_nname_char = ( char*) map_memory_file ( "mmdb/nation_n_name_char" ); int* iatt6_oorderke; iatt6_oorderke = ( int*) map_memory_file ( "mmdb/orders_o_orderkey" ); int* iatt7_ocustkey; iatt7_ocustkey = ( int*) map_memory_file ( "mmdb/orders_o_custkey" ); unsigned* iatt10_oorderda; iatt10_oorderda = ( unsigned*) map_memory_file ( "mmdb/orders_o_orderdate" ); int* iatt15_ccustkey; iatt15_ccustkey = ( int*) map_memory_file ( "mmdb/customer_c_custkey" ); size_t* iatt16_cname_offset; iatt16_cname_offset = ( size_t*) map_memory_file ( "mmdb/customer_c_name_offset" ); char* iatt16_cname_char; iatt16_cname_char = ( char*) map_memory_file ( "mmdb/customer_c_name_char" ); size_t* iatt17_caddress_offset; iatt17_caddress_offset = ( size_t*) map_memory_file ( "mmdb/customer_c_address_offset" ); char* iatt17_caddress_char; iatt17_caddress_char = ( char*) map_memory_file ( "mmdb/customer_c_address_char" ); int* iatt18_cnationk; iatt18_cnationk = ( int*) map_memory_file ( "mmdb/customer_c_nationkey" ); size_t* iatt19_cphone_offset; iatt19_cphone_offset = ( size_t*) map_memory_file ( "mmdb/customer_c_phone_offset" ); char* iatt19_cphone_char; iatt19_cphone_char = ( char*) map_memory_file ( "mmdb/customer_c_phone_char" ); float* iatt20_cacctbal; iatt20_cacctbal = ( float*) map_memory_file ( "mmdb/customer_c_acctbal" ); size_t* iatt22_ccomment_offset; iatt22_ccomment_offset = ( size_t*) map_memory_file ( "mmdb/customer_c_comment_offset" ); char* iatt22_ccomment_char; iatt22_ccomment_char = ( char*) map_memory_file ( "mmdb/customer_c_comment_char" ); int* iatt23_lorderke; iatt23_lorderke = ( int*) map_memory_file ( "mmdb/lineitem_l_orderkey" ); float* iatt28_lextende; iatt28_lextende = ( float*) map_memory_file ( "mmdb/lineitem_l_extendedprice" ); float* iatt29_ldiscoun; iatt29_ldiscoun = ( float*) map_memory_file ( "mmdb/lineitem_l_discount" ); char* iatt31_lreturnf; iatt31_lreturnf = ( char*) map_memory_file ( "mmdb/lineitem_l_returnflag" ); int nout_result; std::vector < int > oatt15_ccustkey(69374); std::vector < str_offs > oatt16_cname_offset(69374); std::vector < float > oatt20_cacctbal(69374); std::vector < str_offs > oatt19_cphone_offset(69374); std::vector < str_offs > oatt3_nname_offset(69374); std::vector < str_offs > oatt17_caddress_offset(69374); std::vector < str_offs > oatt22_ccomment_offset(69374); std::vector < float > oatt1_revenue(69374); // wake up gpu cudaDeviceSynchronize(); { cudaError err = cudaGetLastError(); if(err != cudaSuccess) { std::cerr << "Cuda Error in wake up gpu! " << cudaGetErrorString( err ) << std::endl; ERROR("wake up gpu") } } int* d_iatt2_nnationk; cudaMalloc((void**) &d_iatt2_nnationk, 25* sizeof(int) ); size_t* d_iatt3_nname_offset; cudaMalloc((void**) &d_iatt3_nname_offset, (25 + 1)* sizeof(size_t) ); char* d_iatt3_nname_char; cudaMalloc((void**) &d_iatt3_nname_char, 186* sizeof(char) ); int* d_iatt6_oorderke; cudaMalloc((void**) &d_iatt6_oorderke, 1500000* sizeof(int) ); int* d_iatt7_ocustkey; cudaMalloc((void**) &d_iatt7_ocustkey, 1500000* sizeof(int) ); unsigned* d_iatt10_oorderda; cudaMalloc((void**) &d_iatt10_oorderda, 1500000* sizeof(unsigned) ); int* d_iatt15_ccustkey; cudaMalloc((void**) &d_iatt15_ccustkey, 150000* sizeof(int) ); size_t* d_iatt16_cname_offset; cudaMalloc((void**) &d_iatt16_cname_offset, (150000 + 1)* sizeof(size_t) ); char* d_iatt16_cname_char; cudaMalloc((void**) &d_iatt16_cname_char, 2700009* sizeof(char) ); size_t* d_iatt17_caddress_offset; cudaMalloc((void**) &d_iatt17_caddress_offset, (150000 + 1)* sizeof(size_t) ); char* d_iatt17_caddress_char; cudaMalloc((void**) &d_iatt17_caddress_char, 3753296* sizeof(char) ); int* d_iatt18_cnationk; cudaMalloc((void**) &d_iatt18_cnationk, 150000* sizeof(int) ); size_t* d_iatt19_cphone_offset; cudaMalloc((void**) &d_iatt19_cphone_offset, (150000 + 1)* sizeof(size_t) ); char* d_iatt19_cphone_char; cudaMalloc((void**) &d_iatt19_cphone_char, 2250009* sizeof(char) ); float* d_iatt20_cacctbal; cudaMalloc((void**) &d_iatt20_cacctbal, 150000* sizeof(float) ); size_t* d_iatt22_ccomment_offset; cudaMalloc((void**) &d_iatt22_ccomment_offset, (150000 + 1)* sizeof(size_t) ); char* d_iatt22_ccomment_char; cudaMalloc((void**) &d_iatt22_ccomment_char, 10836339* sizeof(char) ); int* d_iatt23_lorderke; cudaMalloc((void**) &d_iatt23_lorderke, 6001215* sizeof(int) ); float* d_iatt28_lextende; cudaMalloc((void**) &d_iatt28_lextende, 6001215* sizeof(float) ); float* d_iatt29_ldiscoun; cudaMalloc((void**) &d_iatt29_ldiscoun, 6001215* sizeof(float) ); char* d_iatt31_lreturnf; cudaMalloc((void**) &d_iatt31_lreturnf, 6001215* sizeof(char) ); int* d_nout_result; cudaMalloc((void**) &d_nout_result, 1* sizeof(int) ); int* d_oatt15_ccustkey; cudaMalloc((void**) &d_oatt15_ccustkey, 69374* sizeof(int) ); str_offs* d_oatt16_cname_offset; cudaMalloc((void**) &d_oatt16_cname_offset, 69374* sizeof(str_offs) ); float* d_oatt20_cacctbal; cudaMalloc((void**) &d_oatt20_cacctbal, 69374* sizeof(float) ); str_offs* d_oatt19_cphone_offset; cudaMalloc((void**) &d_oatt19_cphone_offset, 69374* sizeof(str_offs) ); str_offs* d_oatt3_nname_offset; cudaMalloc((void**) &d_oatt3_nname_offset, 69374* sizeof(str_offs) ); str_offs* d_oatt17_caddress_offset; cudaMalloc((void**) &d_oatt17_caddress_offset, 69374* sizeof(str_offs) ); str_offs* d_oatt22_ccomment_offset; cudaMalloc((void**) &d_oatt22_ccomment_offset, 69374* sizeof(str_offs) ); float* d_oatt1_revenue; cudaMalloc((void**) &d_oatt1_revenue, 69374* sizeof(float) ); cudaDeviceSynchronize(); { cudaError err = cudaGetLastError(); if(err != cudaSuccess) { std::cerr << "Cuda Error in cuda malloc! " << cudaGetErrorString( err ) << std::endl; ERROR("cuda malloc") } } // show memory usage of GPU { size_t free_byte ; size_t total_byte ; cudaError_t cuda_status = cudaMemGetInfo( &free_byte, &total_byte ) ; if ( cudaSuccess != cuda_status ) { printf("Error: cudaMemGetInfo fails, %s \n", cudaGetErrorString(cuda_status) ); exit(1); } double free_db = (double)free_byte ; double total_db = (double)total_byte ; double used_db = total_db - free_db ; fprintf(stderr, "Memory %.1f / %.1f GB\n", used_db/(1024*1024*1024), total_db/(1024*1024*1024) ); fflush(stdout); } unique_ht<jpayl6>* d_jht6; cudaMalloc((void**) &d_jht6, 50* sizeof(unique_ht<jpayl6>) ); { int gridsize=100; int blocksize=32; initUniqueHT<<<gridsize, blocksize>>>(d_jht6, 50); } multi_ht* d_jht5; cudaMalloc((void**) &d_jht5, 150000* sizeof(multi_ht) ); jpayl5* d_jht5_payload; cudaMalloc((void**) &d_jht5_payload, 150000* sizeof(jpayl5) ); { int gridsize=100; int blocksize=32; initMultiHT<<<gridsize, blocksize>>>(d_jht5, 150000); } int* d_offs5; cudaMalloc((void**) &d_offs5, 1* sizeof(int) ); { int gridsize=100; int blocksize=32; initArray<<<gridsize, blocksize>>>(d_offs5, 0, 1); } unique_ht<jpayl9>* d_jht9; cudaMalloc((void**) &d_jht9, 300000* sizeof(unique_ht<jpayl9>) ); { int gridsize=100; int blocksize=32; initUniqueHT<<<gridsize, blocksize>>>(d_jht9, 300000); } agg_ht<apayl11>* d_aht11; cudaMalloc((void**) &d_aht11, 138748* sizeof(agg_ht<apayl11>) ); { int gridsize=100; int blocksize=32; initAggHT<<<gridsize, blocksize>>>(d_aht11, 138748); } float* d_agg1; cudaMalloc((void**) &d_agg1, 138748* sizeof(float) ); { int gridsize=100; int blocksize=32; initArray<<<gridsize, blocksize>>>(d_agg1, 0.0f, 138748); } { int gridsize=100; int blocksize=32; initArray<<<gridsize, blocksize>>>(d_nout_result, 0, 1); } cudaDeviceSynchronize(); { cudaError err = cudaGetLastError(); if(err != cudaSuccess) { std::cerr << "Cuda Error in cuda mallocHT! " << cudaGetErrorString( err ) << std::endl; ERROR("cuda mallocHT") } } // show memory usage of GPU { size_t free_byte ; size_t total_byte ; cudaError_t cuda_status = cudaMemGetInfo( &free_byte, &total_byte ) ; if ( cudaSuccess != cuda_status ) { printf("Error: cudaMemGetInfo fails, %s \n", cudaGetErrorString(cuda_status) ); exit(1); } double free_db = (double)free_byte ; double total_db = (double)total_byte ; double used_db = total_db - free_db ; fprintf(stderr, "Memory %.1f / %.1f GB\n", used_db/(1024*1024*1024), total_db/(1024*1024*1024) ); fflush(stdout); } cudaMemcpy( d_iatt2_nnationk, iatt2_nnationk, 25 * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy( d_iatt3_nname_offset, iatt3_nname_offset, (25 + 1) * sizeof(size_t), cudaMemcpyHostToDevice); cudaMemcpy( d_iatt3_nname_char, iatt3_nname_char, 186 * sizeof(char), cudaMemcpyHostToDevice); cudaMemcpy( d_iatt6_oorderke, iatt6_oorderke, 1500000 * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy( d_iatt7_ocustkey, iatt7_ocustkey, 1500000 * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy( d_iatt10_oorderda, iatt10_oorderda, 1500000 * sizeof(unsigned), cudaMemcpyHostToDevice); cudaMemcpy( d_iatt15_ccustkey, iatt15_ccustkey, 150000 * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy( d_iatt16_cname_offset, iatt16_cname_offset, (150000 + 1) * sizeof(size_t), cudaMemcpyHostToDevice); cudaMemcpy( d_iatt16_cname_char, iatt16_cname_char, 2700009 * sizeof(char), cudaMemcpyHostToDevice); cudaMemcpy( d_iatt17_caddress_offset, iatt17_caddress_offset, (150000 + 1) * sizeof(size_t), cudaMemcpyHostToDevice); cudaMemcpy( d_iatt17_caddress_char, iatt17_caddress_char, 3753296 * sizeof(char), cudaMemcpyHostToDevice); cudaMemcpy( d_iatt18_cnationk, iatt18_cnationk, 150000 * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy( d_iatt19_cphone_offset, iatt19_cphone_offset, (150000 + 1) * sizeof(size_t), cudaMemcpyHostToDevice); cudaMemcpy( d_iatt19_cphone_char, iatt19_cphone_char, 2250009 * sizeof(char), cudaMemcpyHostToDevice); cudaMemcpy( d_iatt20_cacctbal, iatt20_cacctbal, 150000 * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy( d_iatt22_ccomment_offset, iatt22_ccomment_offset, (150000 + 1) * sizeof(size_t), cudaMemcpyHostToDevice); cudaMemcpy( d_iatt22_ccomment_char, iatt22_ccomment_char, 10836339 * sizeof(char), cudaMemcpyHostToDevice); cudaMemcpy( d_iatt23_lorderke, iatt23_lorderke, 6001215 * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy( d_iatt28_lextende, iatt28_lextende, 6001215 * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy( d_iatt29_ldiscoun, iatt29_ldiscoun, 6001215 * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy( d_iatt31_lreturnf, iatt31_lreturnf, 6001215 * sizeof(char), cudaMemcpyHostToDevice); cudaDeviceSynchronize(); { cudaError err = cudaGetLastError(); if(err != cudaSuccess) { std::cerr << "Cuda Error in cuda memcpy in! " << cudaGetErrorString( err ) << std::endl; ERROR("cuda memcpy in") } } std::clock_t start_totalKernelTime0 = std::clock(); std::clock_t start_krnl_nation11 = std::clock(); { int gridsize=100; int blocksize=32; krnl_nation1<<<gridsize, blocksize>>>(d_iatt2_nnationk, d_iatt3_nname_offset, d_iatt3_nname_char, d_jht6); } cudaDeviceSynchronize(); std::clock_t stop_krnl_nation11 = std::clock(); { cudaError err = cudaGetLastError(); if(err != cudaSuccess) { std::cerr << "Cuda Error in krnl_nation1! " << cudaGetErrorString( err ) << std::endl; ERROR("krnl_nation1") } } std::clock_t start_krnl_orders22 = std::clock(); { int gridsize=100; int blocksize=32; krnl_orders2<<<gridsize, blocksize>>>(d_iatt6_oorderke, d_iatt7_ocustkey, d_iatt10_oorderda, d_jht5, d_jht5_payload); } cudaDeviceSynchronize(); std::clock_t stop_krnl_orders22 = std::clock(); { cudaError err = cudaGetLastError(); if(err != cudaSuccess) { std::cerr << "Cuda Error in krnl_orders2! " << cudaGetErrorString( err ) << std::endl; ERROR("krnl_orders2") } } std::clock_t start_scanMultiHT3 = std::clock(); { int gridsize=100; int blocksize=32; scanMultiHT<<<gridsize, blocksize>>>(d_jht5, 150000, d_offs5); } cudaDeviceSynchronize(); std::clock_t stop_scanMultiHT3 = std::clock(); { cudaError err = cudaGetLastError(); if(err != cudaSuccess) { std::cerr << "Cuda Error in scanMultiHT! " << cudaGetErrorString( err ) << std::endl; ERROR("scanMultiHT") } } std::clock_t start_krnl_orders2_ins4 = std::clock(); { int gridsize=100; int blocksize=32; krnl_orders2_ins<<<gridsize, blocksize>>>(d_iatt6_oorderke, d_iatt7_ocustkey, d_iatt10_oorderda, d_jht5, d_jht5_payload, d_offs5); } cudaDeviceSynchronize(); std::clock_t stop_krnl_orders2_ins4 = std::clock(); { cudaError err = cudaGetLastError(); if(err != cudaSuccess) { std::cerr << "Cuda Error in krnl_orders2_ins! " << cudaGetErrorString( err ) << std::endl; ERROR("krnl_orders2_ins") } } std::clock_t start_krnl_customer45 = std::clock(); { int gridsize=100; int blocksize=32; krnl_customer4<<<gridsize, blocksize>>>(d_iatt15_ccustkey, d_iatt16_cname_offset, d_iatt16_cname_char, d_iatt17_caddress_offset, d_iatt17_caddress_char, d_iatt18_cnationk, d_iatt19_cphone_offset, d_iatt19_cphone_char, d_iatt20_cacctbal, d_iatt22_ccomment_offset, d_iatt22_ccomment_char, d_jht5, d_jht5_payload, d_jht6, d_jht9); } cudaDeviceSynchronize(); std::clock_t stop_krnl_customer45 = std::clock(); { cudaError err = cudaGetLastError(); if(err != cudaSuccess) { std::cerr << "Cuda Error in krnl_customer4! " << cudaGetErrorString( err ) << std::endl; ERROR("krnl_customer4") } } std::clock_t start_krnl_lineitem76 = std::clock(); { int gridsize=100; int blocksize=32; krnl_lineitem7<<<gridsize, blocksize>>>(d_iatt23_lorderke, d_iatt28_lextende, d_iatt29_ldiscoun, d_iatt31_lreturnf, d_jht9, d_aht11, d_agg1); } cudaDeviceSynchronize(); std::clock_t stop_krnl_lineitem76 = std::clock(); { cudaError err = cudaGetLastError(); if(err != cudaSuccess) { std::cerr << "Cuda Error in krnl_lineitem7! " << cudaGetErrorString( err ) << std::endl; ERROR("krnl_lineitem7") } } std::clock_t start_krnl_aggregation117 = std::clock(); { int gridsize=100; int blocksize=32; krnl_aggregation11<<<gridsize, blocksize>>>(d_aht11, d_agg1, d_nout_result, d_oatt15_ccustkey, d_oatt16_cname_offset, d_iatt16_cname_char, d_oatt20_cacctbal, d_oatt19_cphone_offset, d_iatt19_cphone_char, d_oatt3_nname_offset, d_iatt3_nname_char, d_oatt17_caddress_offset, d_iatt17_caddress_char, d_oatt22_ccomment_offset, d_iatt22_ccomment_char, d_oatt1_revenue); } cudaDeviceSynchronize(); std::clock_t stop_krnl_aggregation117 = std::clock(); { cudaError err = cudaGetLastError(); if(err != cudaSuccess) { std::cerr << "Cuda Error in krnl_aggregation11! " << cudaGetErrorString( err ) << std::endl; ERROR("krnl_aggregation11") } } std::clock_t stop_totalKernelTime0 = std::clock(); cudaMemcpy( &nout_result, d_nout_result, 1 * sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy( oatt15_ccustkey.data(), d_oatt15_ccustkey, 69374 * sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy( oatt16_cname_offset.data(), d_oatt16_cname_offset, 69374 * sizeof(str_offs), cudaMemcpyDeviceToHost); cudaMemcpy( oatt20_cacctbal.data(), d_oatt20_cacctbal, 69374 * sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy( oatt19_cphone_offset.data(), d_oatt19_cphone_offset, 69374 * sizeof(str_offs), cudaMemcpyDeviceToHost); cudaMemcpy( oatt3_nname_offset.data(), d_oatt3_nname_offset, 69374 * sizeof(str_offs), cudaMemcpyDeviceToHost); cudaMemcpy( oatt17_caddress_offset.data(), d_oatt17_caddress_offset, 69374 * sizeof(str_offs), cudaMemcpyDeviceToHost); cudaMemcpy( oatt22_ccomment_offset.data(), d_oatt22_ccomment_offset, 69374 * sizeof(str_offs), cudaMemcpyDeviceToHost); cudaMemcpy( oatt1_revenue.data(), d_oatt1_revenue, 69374 * sizeof(float), cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); { cudaError err = cudaGetLastError(); if(err != cudaSuccess) { std::cerr << "Cuda Error in cuda memcpy out! " << cudaGetErrorString( err ) << std::endl; ERROR("cuda memcpy out") } } cudaFree( d_iatt2_nnationk); cudaFree( d_iatt3_nname_offset); cudaFree( d_iatt3_nname_char); cudaFree( d_jht6); cudaFree( d_iatt6_oorderke); cudaFree( d_iatt7_ocustkey); cudaFree( d_iatt10_oorderda); cudaFree( d_jht5); cudaFree( d_jht5_payload); cudaFree( d_offs5); cudaFree( d_iatt15_ccustkey); cudaFree( d_iatt16_cname_offset); cudaFree( d_iatt16_cname_char); cudaFree( d_iatt17_caddress_offset); cudaFree( d_iatt17_caddress_char); cudaFree( d_iatt18_cnationk); cudaFree( d_iatt19_cphone_offset); cudaFree( d_iatt19_cphone_char); cudaFree( d_iatt20_cacctbal); cudaFree( d_iatt22_ccomment_offset); cudaFree( d_iatt22_ccomment_char); cudaFree( d_jht9); cudaFree( d_iatt23_lorderke); cudaFree( d_iatt28_lextende); cudaFree( d_iatt29_ldiscoun); cudaFree( d_iatt31_lreturnf); cudaFree( d_aht11); cudaFree( d_agg1); cudaFree( d_nout_result); cudaFree( d_oatt15_ccustkey); cudaFree( d_oatt16_cname_offset); cudaFree( d_oatt20_cacctbal); cudaFree( d_oatt19_cphone_offset); cudaFree( d_oatt3_nname_offset); cudaFree( d_oatt17_caddress_offset); cudaFree( d_oatt22_ccomment_offset); cudaFree( d_oatt1_revenue); cudaDeviceSynchronize(); { cudaError err = cudaGetLastError(); if(err != cudaSuccess) { std::cerr << "Cuda Error in cuda free! " << cudaGetErrorString( err ) << std::endl; ERROR("cuda free") } } std::clock_t start_finish8 = std::clock(); printf("\nResult: %i tuples\n", nout_result); if((nout_result > 69374)) { ERROR("Index out of range. Output size larger than allocated with expected result number.") } for ( int pv = 0; ((pv < 10) && (pv < nout_result)); pv += 1) { printf("c_custkey: "); printf("%8i", oatt15_ccustkey[pv]); printf(" "); printf("c_name: "); stringPrint ( iatt16_cname_char, oatt16_cname_offset[pv]); printf(" "); printf("c_acctbal: "); printf("%15.2f", oatt20_cacctbal[pv]); printf(" "); printf("c_phone: "); stringPrint ( iatt19_cphone_char, oatt19_cphone_offset[pv]); printf(" "); printf("n_name: "); stringPrint ( iatt3_nname_char, oatt3_nname_offset[pv]); printf(" "); printf("c_address: "); stringPrint ( iatt17_caddress_char, oatt17_caddress_offset[pv]); printf(" "); printf("c_comment: "); stringPrint ( iatt22_ccomment_char, oatt22_ccomment_offset[pv]); printf(" "); printf("revenue: "); printf("%15.2f", oatt1_revenue[pv]); printf(" "); printf("\n"); } if((nout_result > 10)) { printf("[...]\n"); } printf("\n"); std::clock_t stop_finish8 = std::clock(); printf("<timing>\n"); printf ( "%32s: %6.1f ms\n", "krnl_nation1", (stop_krnl_nation11 - start_krnl_nation11) / (double) (CLOCKS_PER_SEC / 1000) ); printf ( "%32s: %6.1f ms\n", "krnl_orders2", (stop_krnl_orders22 - start_krnl_orders22) / (double) (CLOCKS_PER_SEC / 1000) ); printf ( "%32s: %6.1f ms\n", "scanMultiHT", (stop_scanMultiHT3 - start_scanMultiHT3) / (double) (CLOCKS_PER_SEC / 1000) ); printf ( "%32s: %6.1f ms\n", "krnl_orders2_ins", (stop_krnl_orders2_ins4 - start_krnl_orders2_ins4) / (double) (CLOCKS_PER_SEC / 1000) ); printf ( "%32s: %6.1f ms\n", "krnl_customer4", (stop_krnl_customer45 - start_krnl_customer45) / (double) (CLOCKS_PER_SEC / 1000) ); printf ( "%32s: %6.1f ms\n", "krnl_lineitem7", (stop_krnl_lineitem76 - start_krnl_lineitem76) / (double) (CLOCKS_PER_SEC / 1000) ); printf ( "%32s: %6.1f ms\n", "krnl_aggregation11", (stop_krnl_aggregation117 - start_krnl_aggregation117) / (double) (CLOCKS_PER_SEC / 1000) ); printf ( "%32s: %6.1f ms\n", "finish", (stop_finish8 - start_finish8) / (double) (CLOCKS_PER_SEC / 1000) ); printf ( "%32s: %6.1f ms\n", "totalKernelTime", (stop_totalKernelTime0 - start_totalKernelTime0) / (double) (CLOCKS_PER_SEC / 1000) ); printf("</timing>\n"); }
1c0d895bea8d87b353d5379aa1fecac2142a0ee6.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2017-2018 Uber Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <thrust/iterator/zip_iterator.h> #include <thrust/iterator/discard_iterator.h> #include <thrust/transform.h> #include <cstring> #include <algorithm> #include <exception> #include "query/algorithm.hpp" #include "query/iterator.hpp" #include "query/time_series_aggregate.h" CGoCallResHandle Sort(DimensionColumnVector keys, uint8_t *values, int valueBytes, int length, void *cudaStream, int device) { CGoCallResHandle resHandle = {nullptr, nullptr}; try { #ifdef RUN_ON_DEVICE hipSetDevice(device); #endif ares::sort(keys, values, valueBytes, length, cudaStream); CheckCUDAError("Sort"); } catch (std::exception &e) { std::cerr << "Exception happend when doing Sort:" << e.what() << std::endl; resHandle.pStrErr = strdup(e.what()); } return resHandle; } CGoCallResHandle Reduce(DimensionColumnVector inputKeys, uint8_t *inputValues, DimensionColumnVector outputKeys, uint8_t *outputValues, int valueBytes, int length, AggregateFunction aggFunc, void *cudaStream, int device) { CGoCallResHandle resHandle = {nullptr, nullptr}; try { #ifdef RUN_ON_DEVICE hipSetDevice(device); #endif resHandle.res = reinterpret_cast<void *>(ares::reduce(inputKeys, inputValues, outputKeys, outputValues, valueBytes, length, aggFunc, cudaStream)); CheckCUDAError("Reduce"); return resHandle; } catch (std::exception &e) { std::cerr << "Exception happend when doing Reduce:" << e.what() << std::endl; resHandle.pStrErr = strdup(e.what()); } return resHandle; } namespace ares { template<typename ValueType> void sortInternal(DimensionColumnVector vector, ValueType values, int length, void *cudaStream) { DimensionHashIterator hashIter(vector.DimValues, vector.IndexVector, vector.NumDimsPerDimWidth, vector.VectorCapacity); #ifdef RUN_ON_DEVICE thrust::copy(thrust::hip::par.on(reinterpret_cast<hipStream_t>(cudaStream)), hashIter, hashIter + length, vector.HashValues); thrust::stable_sort_by_key( thrust::hip::par.on(reinterpret_cast<hipStream_t>(cudaStream)), vector.HashValues, vector.HashValues + length, vector.IndexVector); #else thrust::copy(thrust::host, hashIter, hashIter + length, vector.HashValues); thrust::stable_sort_by_key(thrust::host, vector.HashValues, vector.HashValues + length, vector.IndexVector); #endif } // sort based on DimensionColumnVector void sort(DimensionColumnVector keys, uint8_t *values, int valueBytes, int length, void *cudaStream) { switch (valueBytes) { case 4: sortInternal<uint32_t *>(keys, reinterpret_cast<uint32_t *>(values), length, cudaStream); break; case 8: sortInternal<uint64_t *>(keys, reinterpret_cast<uint64_t *>(values), length, cudaStream); break; default:throw std::invalid_argument("ValueBytes is invalid"); } } template<typename Value, typename AggFunc> int reduceInternal(uint64_t *inputHashValues, uint32_t *inputIndexVector, uint8_t *inputValues, uint64_t *outputHashValues, uint32_t *outputIndexVector, uint8_t *outputValues, int length, void *cudaStream) { thrust::equal_to<uint64_t> binaryPred; AggFunc aggFunc; ReduceByHashFunctor<AggFunc> reduceFunc(aggFunc); auto zippedInputIter = thrust::make_zip_iterator(thrust::make_tuple( inputIndexVector, thrust::make_permutation_iterator(reinterpret_cast<Value *>(inputValues), inputIndexVector))); auto zippedOutputIter = thrust::make_zip_iterator(thrust::make_tuple( outputIndexVector, reinterpret_cast<Value *>(outputValues))); #ifdef RUN_ON_DEVICE auto resEnd = thrust::reduce_by_key( thrust::hip::par.on(reinterpret_cast<hipStream_t>(cudaStream)), inputHashValues, inputHashValues + length, zippedInputIter, thrust::make_discard_iterator(), zippedOutputIter, binaryPred, reduceFunc); return thrust::get<1>(resEnd) - zippedOutputIter; #else auto resEnd = thrust::reduce_by_key(thrust::host, inputHashValues, inputHashValues + length, zippedInputIter, thrust::make_discard_iterator(), zippedOutputIter, binaryPred, reduceFunc); return thrust::get<1>(resEnd) - zippedOutputIter; #endif } struct rolling_avg { typedef uint64_t first_argument_type; typedef uint64_t second_argument_type; typedef uint64_t result_type; __host__ __device__ uint64_t operator()( uint64_t lhs, uint64_t rhs) const { uint32_t lCount = lhs >> 32; uint32_t rCount = rhs >> 32; uint32_t totalCount = lCount + rCount; if (totalCount == 0) { return 0; } uint64_t res = 0; *(reinterpret_cast<uint32_t *>(&res) + 1) = totalCount; // do division first to avoid overflow. *reinterpret_cast<float_t*>(&res) = *reinterpret_cast<float_t*>(&lhs) / totalCount * lCount + *reinterpret_cast<float_t*>(&rhs) / totalCount * rCount; return res; } }; int bindValueAndAggFunc(uint64_t *inputHashValues, uint32_t *inputIndexVector, uint8_t *inputValues, uint64_t *outputHashValues, uint32_t *outputIndexVector, uint8_t *outputValues, int valueBytes, int length, AggregateFunction aggFunc, void *cudaStream) { switch (aggFunc) { case AGGR_SUM_UNSIGNED: if (valueBytes == 4) { return reduceInternal<uint32_t, thrust::plus<uint32_t> >( inputHashValues, inputIndexVector, inputValues, outputHashValues, outputIndexVector, outputValues, length, cudaStream); } else { return reduceInternal<uint64_t, thrust::plus<uint64_t> >( inputHashValues, inputIndexVector, inputValues, outputHashValues, outputIndexVector, outputValues, length, cudaStream); } case AGGR_SUM_SIGNED: if (valueBytes == 4) { return reduceInternal<int32_t, thrust::plus<int32_t> >( inputHashValues, inputIndexVector, inputValues, outputHashValues, outputIndexVector, outputValues, length, cudaStream); } else { return reduceInternal<int64_t, thrust::plus<int64_t> >( inputHashValues, inputIndexVector, inputValues, outputHashValues, outputIndexVector, outputValues, length, cudaStream); } case AGGR_SUM_FLOAT: if (valueBytes == 4) { return reduceInternal<float_t, thrust::plus<float_t> >( inputHashValues, inputIndexVector, inputValues, outputHashValues, outputIndexVector, outputValues, length, cudaStream); } else { return reduceInternal<double_t, thrust::plus<double_t> >( inputHashValues, inputIndexVector, inputValues, outputHashValues, outputIndexVector, outputValues, length, cudaStream); } case AGGR_MIN_UNSIGNED: return reduceInternal<uint32_t, thrust::minimum<uint32_t> >( inputHashValues, inputIndexVector, inputValues, outputHashValues, outputIndexVector, outputValues, length, cudaStream); case AGGR_MIN_SIGNED: return reduceInternal<int32_t, thrust::minimum<int32_t> >( inputHashValues, inputIndexVector, inputValues, outputHashValues, outputIndexVector, outputValues, length, cudaStream); case AGGR_MIN_FLOAT: return reduceInternal<float_t, thrust::minimum<float_t> >( inputHashValues, inputIndexVector, inputValues, outputHashValues, outputIndexVector, outputValues, length, cudaStream); case AGGR_MAX_UNSIGNED: return reduceInternal<uint32_t, thrust::maximum<uint32_t> >( inputHashValues, inputIndexVector, inputValues, outputHashValues, outputIndexVector, outputValues, length, cudaStream); case AGGR_MAX_SIGNED: return reduceInternal<int32_t, thrust::maximum<int32_t> >( inputHashValues, inputIndexVector, inputValues, outputHashValues, outputIndexVector, outputValues, length, cudaStream); case AGGR_MAX_FLOAT: return reduceInternal<float_t, thrust::maximum<float_t> >( inputHashValues, inputIndexVector, inputValues, outputHashValues, outputIndexVector, outputValues, length, cudaStream); case AGGR_AVG_FLOAT: return reduceInternal<uint64_t, rolling_avg > ( inputHashValues, inputIndexVector, inputValues, outputHashValues, outputIndexVector, outputValues, length, cudaStream); default: throw std::invalid_argument("Unsupported aggregation function type"); } } int reduce(DimensionColumnVector inputKeys, uint8_t *inputValues, DimensionColumnVector outputKeys, uint8_t *outputValues, int valueBytes, int length, AggregateFunction aggFunc, void *cudaStream) { int outputLength = bindValueAndAggFunc( inputKeys.HashValues, inputKeys.IndexVector, inputValues, outputKeys.HashValues, outputKeys.IndexVector, outputValues, valueBytes, length, aggFunc, cudaStream); DimensionColumnPermutateIterator iterIn( inputKeys.DimValues, outputKeys.IndexVector, inputKeys.VectorCapacity, outputLength, inputKeys.NumDimsPerDimWidth); DimensionColumnOutputIterator iterOut(outputKeys.DimValues, inputKeys.VectorCapacity, outputLength, inputKeys.NumDimsPerDimWidth); int numDims = 0; for (int i = 0; i < NUM_DIM_WIDTH; i++) { numDims += inputKeys.NumDimsPerDimWidth[i]; } // copy dim values into output #ifdef RUN_ON_DEVICE thrust::copy(thrust::hip::par.on(reinterpret_cast<hipStream_t>(cudaStream)), iterIn, iterIn + numDims * 2 * outputLength, iterOut); #else thrust::copy(thrust::host, iterIn, iterIn + numDims * 2 * outputLength, iterOut); #endif return outputLength; } } // namespace ares
1c0d895bea8d87b353d5379aa1fecac2142a0ee6.cu
// Copyright (c) 2017-2018 Uber Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <thrust/iterator/zip_iterator.h> #include <thrust/iterator/discard_iterator.h> #include <thrust/transform.h> #include <cstring> #include <algorithm> #include <exception> #include "query/algorithm.hpp" #include "query/iterator.hpp" #include "query/time_series_aggregate.h" CGoCallResHandle Sort(DimensionColumnVector keys, uint8_t *values, int valueBytes, int length, void *cudaStream, int device) { CGoCallResHandle resHandle = {nullptr, nullptr}; try { #ifdef RUN_ON_DEVICE cudaSetDevice(device); #endif ares::sort(keys, values, valueBytes, length, cudaStream); CheckCUDAError("Sort"); } catch (std::exception &e) { std::cerr << "Exception happend when doing Sort:" << e.what() << std::endl; resHandle.pStrErr = strdup(e.what()); } return resHandle; } CGoCallResHandle Reduce(DimensionColumnVector inputKeys, uint8_t *inputValues, DimensionColumnVector outputKeys, uint8_t *outputValues, int valueBytes, int length, AggregateFunction aggFunc, void *cudaStream, int device) { CGoCallResHandle resHandle = {nullptr, nullptr}; try { #ifdef RUN_ON_DEVICE cudaSetDevice(device); #endif resHandle.res = reinterpret_cast<void *>(ares::reduce(inputKeys, inputValues, outputKeys, outputValues, valueBytes, length, aggFunc, cudaStream)); CheckCUDAError("Reduce"); return resHandle; } catch (std::exception &e) { std::cerr << "Exception happend when doing Reduce:" << e.what() << std::endl; resHandle.pStrErr = strdup(e.what()); } return resHandle; } namespace ares { template<typename ValueType> void sortInternal(DimensionColumnVector vector, ValueType values, int length, void *cudaStream) { DimensionHashIterator hashIter(vector.DimValues, vector.IndexVector, vector.NumDimsPerDimWidth, vector.VectorCapacity); #ifdef RUN_ON_DEVICE thrust::copy(thrust::cuda::par.on(reinterpret_cast<cudaStream_t>(cudaStream)), hashIter, hashIter + length, vector.HashValues); thrust::stable_sort_by_key( thrust::cuda::par.on(reinterpret_cast<cudaStream_t>(cudaStream)), vector.HashValues, vector.HashValues + length, vector.IndexVector); #else thrust::copy(thrust::host, hashIter, hashIter + length, vector.HashValues); thrust::stable_sort_by_key(thrust::host, vector.HashValues, vector.HashValues + length, vector.IndexVector); #endif } // sort based on DimensionColumnVector void sort(DimensionColumnVector keys, uint8_t *values, int valueBytes, int length, void *cudaStream) { switch (valueBytes) { case 4: sortInternal<uint32_t *>(keys, reinterpret_cast<uint32_t *>(values), length, cudaStream); break; case 8: sortInternal<uint64_t *>(keys, reinterpret_cast<uint64_t *>(values), length, cudaStream); break; default:throw std::invalid_argument("ValueBytes is invalid"); } } template<typename Value, typename AggFunc> int reduceInternal(uint64_t *inputHashValues, uint32_t *inputIndexVector, uint8_t *inputValues, uint64_t *outputHashValues, uint32_t *outputIndexVector, uint8_t *outputValues, int length, void *cudaStream) { thrust::equal_to<uint64_t> binaryPred; AggFunc aggFunc; ReduceByHashFunctor<AggFunc> reduceFunc(aggFunc); auto zippedInputIter = thrust::make_zip_iterator(thrust::make_tuple( inputIndexVector, thrust::make_permutation_iterator(reinterpret_cast<Value *>(inputValues), inputIndexVector))); auto zippedOutputIter = thrust::make_zip_iterator(thrust::make_tuple( outputIndexVector, reinterpret_cast<Value *>(outputValues))); #ifdef RUN_ON_DEVICE auto resEnd = thrust::reduce_by_key( thrust::cuda::par.on(reinterpret_cast<cudaStream_t>(cudaStream)), inputHashValues, inputHashValues + length, zippedInputIter, thrust::make_discard_iterator(), zippedOutputIter, binaryPred, reduceFunc); return thrust::get<1>(resEnd) - zippedOutputIter; #else auto resEnd = thrust::reduce_by_key(thrust::host, inputHashValues, inputHashValues + length, zippedInputIter, thrust::make_discard_iterator(), zippedOutputIter, binaryPred, reduceFunc); return thrust::get<1>(resEnd) - zippedOutputIter; #endif } struct rolling_avg { typedef uint64_t first_argument_type; typedef uint64_t second_argument_type; typedef uint64_t result_type; __host__ __device__ uint64_t operator()( uint64_t lhs, uint64_t rhs) const { uint32_t lCount = lhs >> 32; uint32_t rCount = rhs >> 32; uint32_t totalCount = lCount + rCount; if (totalCount == 0) { return 0; } uint64_t res = 0; *(reinterpret_cast<uint32_t *>(&res) + 1) = totalCount; // do division first to avoid overflow. *reinterpret_cast<float_t*>(&res) = *reinterpret_cast<float_t*>(&lhs) / totalCount * lCount + *reinterpret_cast<float_t*>(&rhs) / totalCount * rCount; return res; } }; int bindValueAndAggFunc(uint64_t *inputHashValues, uint32_t *inputIndexVector, uint8_t *inputValues, uint64_t *outputHashValues, uint32_t *outputIndexVector, uint8_t *outputValues, int valueBytes, int length, AggregateFunction aggFunc, void *cudaStream) { switch (aggFunc) { case AGGR_SUM_UNSIGNED: if (valueBytes == 4) { return reduceInternal<uint32_t, thrust::plus<uint32_t> >( inputHashValues, inputIndexVector, inputValues, outputHashValues, outputIndexVector, outputValues, length, cudaStream); } else { return reduceInternal<uint64_t, thrust::plus<uint64_t> >( inputHashValues, inputIndexVector, inputValues, outputHashValues, outputIndexVector, outputValues, length, cudaStream); } case AGGR_SUM_SIGNED: if (valueBytes == 4) { return reduceInternal<int32_t, thrust::plus<int32_t> >( inputHashValues, inputIndexVector, inputValues, outputHashValues, outputIndexVector, outputValues, length, cudaStream); } else { return reduceInternal<int64_t, thrust::plus<int64_t> >( inputHashValues, inputIndexVector, inputValues, outputHashValues, outputIndexVector, outputValues, length, cudaStream); } case AGGR_SUM_FLOAT: if (valueBytes == 4) { return reduceInternal<float_t, thrust::plus<float_t> >( inputHashValues, inputIndexVector, inputValues, outputHashValues, outputIndexVector, outputValues, length, cudaStream); } else { return reduceInternal<double_t, thrust::plus<double_t> >( inputHashValues, inputIndexVector, inputValues, outputHashValues, outputIndexVector, outputValues, length, cudaStream); } case AGGR_MIN_UNSIGNED: return reduceInternal<uint32_t, thrust::minimum<uint32_t> >( inputHashValues, inputIndexVector, inputValues, outputHashValues, outputIndexVector, outputValues, length, cudaStream); case AGGR_MIN_SIGNED: return reduceInternal<int32_t, thrust::minimum<int32_t> >( inputHashValues, inputIndexVector, inputValues, outputHashValues, outputIndexVector, outputValues, length, cudaStream); case AGGR_MIN_FLOAT: return reduceInternal<float_t, thrust::minimum<float_t> >( inputHashValues, inputIndexVector, inputValues, outputHashValues, outputIndexVector, outputValues, length, cudaStream); case AGGR_MAX_UNSIGNED: return reduceInternal<uint32_t, thrust::maximum<uint32_t> >( inputHashValues, inputIndexVector, inputValues, outputHashValues, outputIndexVector, outputValues, length, cudaStream); case AGGR_MAX_SIGNED: return reduceInternal<int32_t, thrust::maximum<int32_t> >( inputHashValues, inputIndexVector, inputValues, outputHashValues, outputIndexVector, outputValues, length, cudaStream); case AGGR_MAX_FLOAT: return reduceInternal<float_t, thrust::maximum<float_t> >( inputHashValues, inputIndexVector, inputValues, outputHashValues, outputIndexVector, outputValues, length, cudaStream); case AGGR_AVG_FLOAT: return reduceInternal<uint64_t, rolling_avg > ( inputHashValues, inputIndexVector, inputValues, outputHashValues, outputIndexVector, outputValues, length, cudaStream); default: throw std::invalid_argument("Unsupported aggregation function type"); } } int reduce(DimensionColumnVector inputKeys, uint8_t *inputValues, DimensionColumnVector outputKeys, uint8_t *outputValues, int valueBytes, int length, AggregateFunction aggFunc, void *cudaStream) { int outputLength = bindValueAndAggFunc( inputKeys.HashValues, inputKeys.IndexVector, inputValues, outputKeys.HashValues, outputKeys.IndexVector, outputValues, valueBytes, length, aggFunc, cudaStream); DimensionColumnPermutateIterator iterIn( inputKeys.DimValues, outputKeys.IndexVector, inputKeys.VectorCapacity, outputLength, inputKeys.NumDimsPerDimWidth); DimensionColumnOutputIterator iterOut(outputKeys.DimValues, inputKeys.VectorCapacity, outputLength, inputKeys.NumDimsPerDimWidth); int numDims = 0; for (int i = 0; i < NUM_DIM_WIDTH; i++) { numDims += inputKeys.NumDimsPerDimWidth[i]; } // copy dim values into output #ifdef RUN_ON_DEVICE thrust::copy(thrust::cuda::par.on(reinterpret_cast<cudaStream_t>(cudaStream)), iterIn, iterIn + numDims * 2 * outputLength, iterOut); #else thrust::copy(thrust::host, iterIn, iterIn + numDims * 2 * outputLength, iterOut); #endif return outputLength; } } // namespace ares
0a338f4a963d2c9916e96a14670e73aa3223e5c8.hip
// !!! This is a file automatically generated by hipify!!! /*! * Copyright (c) 2015 by Contributors * \file lrn.cu * \brief * \author Bing Xu */ #include "./lrn-inl.h" #if MXNET_USE_CUDNN == 1 #include "./cudnn_lrn-inl.h" #endif namespace mxnet { namespace op { template<> Operator* CreateOp<gpu>(LRNParam param) { #if MXNET_USE_CUDNN == 1 return new CuDNNLocalResponseNormOp(param); #else #if TORCH_HIP_VERSION == 7000 LOG(FATAL) << "Due to old CUDA compiler bug, LRN is disabled." << "Please upgrade CUDA to 7.5+ or use CUDNN"; return NULL; #else return new LocalResponseNormOp<gpu>(param); #endif // TORCH_HIP_VERSION #endif // MXNET_USE_CUDNN } } // namespace op } // namespace mxnet
0a338f4a963d2c9916e96a14670e73aa3223e5c8.cu
/*! * Copyright (c) 2015 by Contributors * \file lrn.cu * \brief * \author Bing Xu */ #include "./lrn-inl.h" #if MXNET_USE_CUDNN == 1 #include "./cudnn_lrn-inl.h" #endif namespace mxnet { namespace op { template<> Operator* CreateOp<gpu>(LRNParam param) { #if MXNET_USE_CUDNN == 1 return new CuDNNLocalResponseNormOp(param); #else #if CUDA_VERSION == 7000 LOG(FATAL) << "Due to old CUDA compiler bug, LRN is disabled." << "Please upgrade CUDA to 7.5+ or use CUDNN"; return NULL; #else return new LocalResponseNormOp<gpu>(param); #endif // CUDA_VERSION #endif // MXNET_USE_CUDNN } } // namespace op } // namespace mxnet
6e71a0be52244d16c363ce5050988849e3283f89.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel2_zvel_plus_2_top; int xdim0_update_halo_kernel2_zvel_plus_2_top_h = -1; __constant__ int ydim0_update_halo_kernel2_zvel_plus_2_top; int ydim0_update_halo_kernel2_zvel_plus_2_top_h = -1; __constant__ int xdim1_update_halo_kernel2_zvel_plus_2_top; int xdim1_update_halo_kernel2_zvel_plus_2_top_h = -1; __constant__ int ydim1_update_halo_kernel2_zvel_plus_2_top; int ydim1_update_halo_kernel2_zvel_plus_2_top_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #define OPS_ACC0(x, y, z) \ (x + xdim0_update_halo_kernel2_zvel_plus_2_top * (y) + \ xdim0_update_halo_kernel2_zvel_plus_2_top * \ ydim0_update_halo_kernel2_zvel_plus_2_top * (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_update_halo_kernel2_zvel_plus_2_top * (y) + \ xdim1_update_halo_kernel2_zvel_plus_2_top * \ ydim1_update_halo_kernel2_zvel_plus_2_top * (z)) // user function __device__ inline void update_halo_kernel2_zvel_plus_2_top_gpu(double *zvel0, double *zvel1, const int *fields) { if (fields[FIELD_ZVEL0] == 1) zvel0[OPS_ACC0(0, 0, 0)] = zvel0[OPS_ACC0(0, -2, 0)]; if (fields[FIELD_ZVEL1] == 1) zvel1[OPS_ACC1(0, 0, 0)] = zvel1[OPS_ACC1(0, -2, 0)]; } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel2_zvel_plus_2_top( double *__restrict arg0, double *__restrict arg1, const int *__restrict arg2, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel2_zvel_plus_2_top + idx_z * 1 * 1 * xdim0_update_halo_kernel2_zvel_plus_2_top * ydim0_update_halo_kernel2_zvel_plus_2_top; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel2_zvel_plus_2_top + idx_z * 1 * 1 * xdim1_update_halo_kernel2_zvel_plus_2_top * ydim1_update_halo_kernel2_zvel_plus_2_top; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel2_zvel_plus_2_top_gpu(arg0, arg1, arg2); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_update_halo_kernel2_zvel_plus_2_top(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { #else void ops_par_loop_update_halo_kernel2_zvel_plus_2_top_execute( ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; #endif // Timing double t1, t2, c1, c2; ops_arg args[3] = {arg0, arg1, arg2}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args, 3, range, 51)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(51, "update_halo_kernel2_zvel_plus_2_top"); OPS_kernels[51].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != xdim0_update_halo_kernel2_zvel_plus_2_top_h || ydim0 != ydim0_update_halo_kernel2_zvel_plus_2_top_h || xdim1 != xdim1_update_halo_kernel2_zvel_plus_2_top_h || ydim1 != ydim1_update_halo_kernel2_zvel_plus_2_top_h) { hipMemcpyToSymbol(xdim0_update_halo_kernel2_zvel_plus_2_top, &xdim0, sizeof(int)); xdim0_update_halo_kernel2_zvel_plus_2_top_h = xdim0; hipMemcpyToSymbol(ydim0_update_halo_kernel2_zvel_plus_2_top, &ydim0, sizeof(int)); ydim0_update_halo_kernel2_zvel_plus_2_top_h = ydim0; hipMemcpyToSymbol(xdim1_update_halo_kernel2_zvel_plus_2_top, &xdim1, sizeof(int)); xdim1_update_halo_kernel2_zvel_plus_2_top_h = xdim1; hipMemcpyToSymbol(ydim1_update_halo_kernel2_zvel_plus_2_top, &ydim1, sizeof(int)); ydim1_update_halo_kernel2_zvel_plus_2_top_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d = 0; d < NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); char *p_a[3]; // set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args, 3, range); #endif if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[51].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data hipLaunchKernelGGL(( ops_update_halo_kernel2_zvel_plus_2_top), dim3(grid), dim3(tblock), 0, 0, (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size, z_size); cutilSafeCall(hipGetLastError()); if (OPS_diags > 1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[51].time += t1 - t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0], range); ops_set_halo_dirtybit3(&args[1], range); #endif if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[51].mpi_time += t2 - t1; OPS_kernels[51].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[51].transfer += ops_compute_transfer(dim, start, end, &arg1); } } #ifdef OPS_LAZY void ops_par_loop_update_halo_kernel2_zvel_plus_2_top(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 51; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 51; for (int i = 0; i < 6; i++) { desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 3; desc->args = (ops_arg *)malloc(3 * sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; char *tmp = (char *)malloc(NUM_FIELDS * sizeof(int)); memcpy(tmp, arg2.data, NUM_FIELDS * sizeof(int)); desc->args[2].data = tmp; desc->function = ops_par_loop_update_halo_kernel2_zvel_plus_2_top_execute; if (OPS_diags > 1) { ops_timing_realloc(51, "update_halo_kernel2_zvel_plus_2_top"); } ops_enqueue_kernel(desc); } #endif
6e71a0be52244d16c363ce5050988849e3283f89.cu
// // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel2_zvel_plus_2_top; int xdim0_update_halo_kernel2_zvel_plus_2_top_h = -1; __constant__ int ydim0_update_halo_kernel2_zvel_plus_2_top; int ydim0_update_halo_kernel2_zvel_plus_2_top_h = -1; __constant__ int xdim1_update_halo_kernel2_zvel_plus_2_top; int xdim1_update_halo_kernel2_zvel_plus_2_top_h = -1; __constant__ int ydim1_update_halo_kernel2_zvel_plus_2_top; int ydim1_update_halo_kernel2_zvel_plus_2_top_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #define OPS_ACC0(x, y, z) \ (x + xdim0_update_halo_kernel2_zvel_plus_2_top * (y) + \ xdim0_update_halo_kernel2_zvel_plus_2_top * \ ydim0_update_halo_kernel2_zvel_plus_2_top * (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_update_halo_kernel2_zvel_plus_2_top * (y) + \ xdim1_update_halo_kernel2_zvel_plus_2_top * \ ydim1_update_halo_kernel2_zvel_plus_2_top * (z)) // user function __device__ inline void update_halo_kernel2_zvel_plus_2_top_gpu(double *zvel0, double *zvel1, const int *fields) { if (fields[FIELD_ZVEL0] == 1) zvel0[OPS_ACC0(0, 0, 0)] = zvel0[OPS_ACC0(0, -2, 0)]; if (fields[FIELD_ZVEL1] == 1) zvel1[OPS_ACC1(0, 0, 0)] = zvel1[OPS_ACC1(0, -2, 0)]; } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel2_zvel_plus_2_top( double *__restrict arg0, double *__restrict arg1, const int *__restrict arg2, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel2_zvel_plus_2_top + idx_z * 1 * 1 * xdim0_update_halo_kernel2_zvel_plus_2_top * ydim0_update_halo_kernel2_zvel_plus_2_top; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel2_zvel_plus_2_top + idx_z * 1 * 1 * xdim1_update_halo_kernel2_zvel_plus_2_top * ydim1_update_halo_kernel2_zvel_plus_2_top; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel2_zvel_plus_2_top_gpu(arg0, arg1, arg2); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_update_halo_kernel2_zvel_plus_2_top(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { #else void ops_par_loop_update_halo_kernel2_zvel_plus_2_top_execute( ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; #endif // Timing double t1, t2, c1, c2; ops_arg args[3] = {arg0, arg1, arg2}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args, 3, range, 51)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(51, "update_halo_kernel2_zvel_plus_2_top"); OPS_kernels[51].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != xdim0_update_halo_kernel2_zvel_plus_2_top_h || ydim0 != ydim0_update_halo_kernel2_zvel_plus_2_top_h || xdim1 != xdim1_update_halo_kernel2_zvel_plus_2_top_h || ydim1 != ydim1_update_halo_kernel2_zvel_plus_2_top_h) { cudaMemcpyToSymbol(xdim0_update_halo_kernel2_zvel_plus_2_top, &xdim0, sizeof(int)); xdim0_update_halo_kernel2_zvel_plus_2_top_h = xdim0; cudaMemcpyToSymbol(ydim0_update_halo_kernel2_zvel_plus_2_top, &ydim0, sizeof(int)); ydim0_update_halo_kernel2_zvel_plus_2_top_h = ydim0; cudaMemcpyToSymbol(xdim1_update_halo_kernel2_zvel_plus_2_top, &xdim1, sizeof(int)); xdim1_update_halo_kernel2_zvel_plus_2_top_h = xdim1; cudaMemcpyToSymbol(ydim1_update_halo_kernel2_zvel_plus_2_top, &ydim1, sizeof(int)); ydim1_update_halo_kernel2_zvel_plus_2_top_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d = 0; d < NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); char *p_a[3]; // set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args, 3, range); #endif if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[51].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data ops_update_halo_kernel2_zvel_plus_2_top<<<grid, tblock>>>( (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size, z_size); cutilSafeCall(cudaGetLastError()); if (OPS_diags > 1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[51].time += t1 - t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0], range); ops_set_halo_dirtybit3(&args[1], range); #endif if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[51].mpi_time += t2 - t1; OPS_kernels[51].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[51].transfer += ops_compute_transfer(dim, start, end, &arg1); } } #ifdef OPS_LAZY void ops_par_loop_update_halo_kernel2_zvel_plus_2_top(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 51; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 51; for (int i = 0; i < 6; i++) { desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 3; desc->args = (ops_arg *)malloc(3 * sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; char *tmp = (char *)malloc(NUM_FIELDS * sizeof(int)); memcpy(tmp, arg2.data, NUM_FIELDS * sizeof(int)); desc->args[2].data = tmp; desc->function = ops_par_loop_update_halo_kernel2_zvel_plus_2_top_execute; if (OPS_diags > 1) { ops_timing_realloc(51, "update_halo_kernel2_zvel_plus_2_top"); } ops_enqueue_kernel(desc); } #endif
51a668c77e97648e824ea169be7cf7fa51431d17.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" #define N 10000000 //input data size: 10,000,000 #define BLOCKSIZE 1024 /* prefix sum */ using namespace std; __global__ void add(double* in, double* out, int offset, int n){ int gid = threadIdx.x + blockIdx.x * blockDim.x; if(gid >= n) return ; out[gid] = in[gid]; if(gid >= offset) out[gid] += in[gid-offset]; }
51a668c77e97648e824ea169be7cf7fa51431d17.cu
#include "includes.h" #define N 10000000 //input data size: 10,000,000 #define BLOCKSIZE 1024 /* prefix sum */ using namespace std; __global__ void add(double* in, double* out, int offset, int n){ int gid = threadIdx.x + blockIdx.x * blockDim.x; if(gid >= n) return ; out[gid] = in[gid]; if(gid >= offset) out[gid] += in[gid-offset]; }
11f31835596457a98398725dec6a57adcd388105.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright 2022 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <math.h> #include "bartlett_window_impl.cuh" template <typename S> __global__ void BartlettWindowOne(const size_t size, S *output) { for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size; pos += blockDim.x * gridDim.x) { output[pos] = static_cast<S>(1); } return; } template <typename S> __global__ void BartlettWindow(const size_t size, const double N, const double M, S *output) { for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size; pos += blockDim.x * gridDim.x) { double out = 0; if (pos <= M) { out = (2 * pos) / (N - 1); } else { out = 2 - (2 * pos) / (N - 1); } output[pos] = static_cast<S>(out); } return; } template <typename T, typename S> hipError_t CalBartlettWindow(const size_t size, const T *input, const bool periodic, S *output, const uint32_t &device_id, hipStream_t cuda_stream) { T N = 0; hipMemcpy(&N, &input[0], sizeof(T), hipMemcpyDeviceToHost); if (N == 1) { hipLaunchKernelGGL(( BartlettWindowOne), dim3(CUDA_BLOCKS(device_id, size)), dim3(CUDA_THREADS(device_id)), 0, cuda_stream, size, output); } else { N = periodic ? static_cast<double>(N + 1) : static_cast<double>(N); double M = (N - 1) / 2; hipLaunchKernelGGL(( BartlettWindow), dim3(CUDA_BLOCKS(device_id, size)), dim3(CUDA_THREADS(device_id)), 0, cuda_stream, size, N, M, output); } CHECK_CUDA_LAUNCH_SUCCESS(); } template CUDA_LIB_EXPORT hipError_t CalBartlettWindow<int, half>(const size_t size, const int *input, const bool periodic, half *output, const uint32_t &device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT hipError_t CalBartlettWindow<int64_t, half>(const size_t size, const int64_t *input, const bool periodic, half *output, const uint32_t &device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT hipError_t CalBartlettWindow<int, float>(const size_t size, const int *input, const bool periodic, float *output, const uint32_t &device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT hipError_t CalBartlettWindow<int64_t, float>(const size_t size, const int64_t *input, const bool periodic, float *output, const uint32_t &device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT hipError_t CalBartlettWindow<int, double>(const size_t size, const int *input, const bool periodic, double *output, const uint32_t &device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT hipError_t CalBartlettWindow<int64_t, double>(const size_t size, const int64_t *input, const bool periodic, double *output, const uint32_t &device_id, hipStream_t cuda_stream);
11f31835596457a98398725dec6a57adcd388105.cu
/** * Copyright 2022 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <math.h> #include "bartlett_window_impl.cuh" template <typename S> __global__ void BartlettWindowOne(const size_t size, S *output) { for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size; pos += blockDim.x * gridDim.x) { output[pos] = static_cast<S>(1); } return; } template <typename S> __global__ void BartlettWindow(const size_t size, const double N, const double M, S *output) { for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size; pos += blockDim.x * gridDim.x) { double out = 0; if (pos <= M) { out = (2 * pos) / (N - 1); } else { out = 2 - (2 * pos) / (N - 1); } output[pos] = static_cast<S>(out); } return; } template <typename T, typename S> cudaError_t CalBartlettWindow(const size_t size, const T *input, const bool periodic, S *output, const uint32_t &device_id, cudaStream_t cuda_stream) { T N = 0; cudaMemcpy(&N, &input[0], sizeof(T), cudaMemcpyDeviceToHost); if (N == 1) { BartlettWindowOne<<<CUDA_BLOCKS(device_id, size), CUDA_THREADS(device_id), 0, cuda_stream>>>(size, output); } else { N = periodic ? static_cast<double>(N + 1) : static_cast<double>(N); double M = (N - 1) / 2; BartlettWindow<<<CUDA_BLOCKS(device_id, size), CUDA_THREADS(device_id), 0, cuda_stream>>>(size, N, M, output); } CHECK_CUDA_LAUNCH_SUCCESS(); } template CUDA_LIB_EXPORT cudaError_t CalBartlettWindow<int, half>(const size_t size, const int *input, const bool periodic, half *output, const uint32_t &device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT cudaError_t CalBartlettWindow<int64_t, half>(const size_t size, const int64_t *input, const bool periodic, half *output, const uint32_t &device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT cudaError_t CalBartlettWindow<int, float>(const size_t size, const int *input, const bool periodic, float *output, const uint32_t &device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT cudaError_t CalBartlettWindow<int64_t, float>(const size_t size, const int64_t *input, const bool periodic, float *output, const uint32_t &device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT cudaError_t CalBartlettWindow<int, double>(const size_t size, const int *input, const bool periodic, double *output, const uint32_t &device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT cudaError_t CalBartlettWindow<int64_t, double>(const size_t size, const int64_t *input, const bool periodic, double *output, const uint32_t &device_id, cudaStream_t cuda_stream);
2c1cfb7e555fa1a58a2b26c9b0acb073e9b07ac6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __global__ void gpu_tfm_arb_2dly_hmc(float* real_result,float* imag_result,const int n,const int combs, const float* real_exp,const float* img_exp,const int* transmit,const int* receive,const int* lookup_ind_tx, const int* lookup_ind_rx,const int tot_pix, const int grid_x, const int grid_y, const int grid_z, const float* lookup_amp_tx,const float* lookup_amp_rx,const float* tt_weight){ // get pixel's coordinates int pix = blockIdx.x*blockDim.x+threadIdx.x; if (pix<tot_pix){ //local variable float tot_real = 0, tot_imag = 0; for(int ii = 0; ii < combs; ii++){ float real = 0; float imag = 0; int tx = transmit[ii]-1; int rx = receive[ii]-1; int t_ind = (tx*grid_x*grid_y*grid_z)+pix; int r_ind = (rx*grid_x*grid_y*grid_z)+pix; int index1 = lookup_ind_tx[t_ind] + lookup_ind_rx[r_ind]-1; float amp_corr1 = lookup_amp_tx[t_ind]*lookup_amp_rx[r_ind]*tt_weight[ii]/2; int index2 = lookup_ind_tx[r_ind] + lookup_ind_rx[t_ind]-1; float amp_corr2 = lookup_amp_tx[r_ind]*lookup_amp_rx[t_ind]*tt_weight[ii]/2; if(index1<0){ } else if(index1>n){ } else { int set_val1 = ii*(n)+index1; real = real_exp[set_val1]*amp_corr1; imag = img_exp[set_val1]*amp_corr1; // sum each val tot_real += real; tot_imag += imag; } real = 0; imag = 0; if(index2<0){ } else if(index2>n){ } else { int set_val2 = ii*(n)+index2; real = real_exp[set_val2]*amp_corr2; imag = img_exp[set_val2]*amp_corr2; // sum each val tot_real += real; tot_imag += imag; } } // store the final value for the pixel //result[pix] = sqrt(tot_real*tot_real + tot_imag*tot_imag); real_result[pix] = tot_real; imag_result[pix] = tot_imag; } }
2c1cfb7e555fa1a58a2b26c9b0acb073e9b07ac6.cu
__global__ void gpu_tfm_arb_2dly_hmc(float* real_result,float* imag_result,const int n,const int combs, const float* real_exp,const float* img_exp,const int* transmit,const int* receive,const int* lookup_ind_tx, const int* lookup_ind_rx,const int tot_pix, const int grid_x, const int grid_y, const int grid_z, const float* lookup_amp_tx,const float* lookup_amp_rx,const float* tt_weight){ // get pixel's coordinates int pix = blockIdx.x*blockDim.x+threadIdx.x; if (pix<tot_pix){ //local variable float tot_real = 0, tot_imag = 0; for(int ii = 0; ii < combs; ii++){ float real = 0; float imag = 0; int tx = transmit[ii]-1; int rx = receive[ii]-1; int t_ind = (tx*grid_x*grid_y*grid_z)+pix; int r_ind = (rx*grid_x*grid_y*grid_z)+pix; int index1 = lookup_ind_tx[t_ind] + lookup_ind_rx[r_ind]-1; float amp_corr1 = lookup_amp_tx[t_ind]*lookup_amp_rx[r_ind]*tt_weight[ii]/2; int index2 = lookup_ind_tx[r_ind] + lookup_ind_rx[t_ind]-1; float amp_corr2 = lookup_amp_tx[r_ind]*lookup_amp_rx[t_ind]*tt_weight[ii]/2; if(index1<0){ } else if(index1>n){ } else { int set_val1 = ii*(n)+index1; real = real_exp[set_val1]*amp_corr1; imag = img_exp[set_val1]*amp_corr1; // sum each val tot_real += real; tot_imag += imag; } real = 0; imag = 0; if(index2<0){ } else if(index2>n){ } else { int set_val2 = ii*(n)+index2; real = real_exp[set_val2]*amp_corr2; imag = img_exp[set_val2]*amp_corr2; // sum each val tot_real += real; tot_imag += imag; } } // store the final value for the pixel //result[pix] = sqrt(tot_real*tot_real + tot_imag*tot_imag); real_result[pix] = tot_real; imag_result[pix] = tot_imag; } }
48c3d5eb06f9c7d412c4d2aa915236f95948e4fa.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" //********************************** //Nathan Durst //FFT Cuda Program //December, 5 2016 //********************************** //This application uses cuda c and implements // the Cooley-Tukey FFT algorithm to transforms // an array of complex numbers into a data set // correlation of complex numbers. #define N 16384 #define PI 3.14 //kernel function declaration __global__ void FFT(float * R, float * I, float * xR, float * xI) { int id = threadIdx.x + blockIdx.x * blockDim.x; float real = 0, imag = 0; //iterate through entire array for each index and calculate even // and odd for real and imaginary numbers. for (int i = 0; i<(N/2); i++) { //even real += R[i] * cos((2*PI*(i*2))/N) - I[i] * sin((2*PI*id*(i*2))/N); imag += R[i] * -sin((2*PI*(i*2))/N) + I[i] * cos((2*PI*id*(i*2))/N); //odd real += R[i] * cos((2*PI*(i*2+1))/N) - I[i] * sin((2*PI*id*(i*2+1))/N); imag += R[i] * -sin((2*PI*(i*2+1))/N) + I[i] * cos((2*PI*id*(i*2+1))/N); } xR[id] = real; xI[id] = imag; }
48c3d5eb06f9c7d412c4d2aa915236f95948e4fa.cu
#include "includes.h" //********************************** //Nathan Durst //FFT Cuda Program //December, 5 2016 //********************************** //This application uses cuda c and implements // the Cooley-Tukey FFT algorithm to transforms // an array of complex numbers into a data set // correlation of complex numbers. #define N 16384 #define PI 3.14 //kernel function declaration __global__ void FFT(float * R, float * I, float * xR, float * xI) { int id = threadIdx.x + blockIdx.x * blockDim.x; float real = 0, imag = 0; //iterate through entire array for each index and calculate even // and odd for real and imaginary numbers. for (int i = 0; i<(N/2); i++) { //even real += R[i] * cos((2*PI*(i*2))/N) - I[i] * sin((2*PI*id*(i*2))/N); imag += R[i] * -sin((2*PI*(i*2))/N) + I[i] * cos((2*PI*id*(i*2))/N); //odd real += R[i] * cos((2*PI*(i*2+1))/N) - I[i] * sin((2*PI*id*(i*2+1))/N); imag += R[i] * -sin((2*PI*(i*2+1))/N) + I[i] * cos((2*PI*id*(i*2+1))/N); } xR[id] = real; xI[id] = imag; }
954733e6d6198345a70f2fcb854d86244192fd88.hip
// !!! This is a file automatically generated by hipify!!! #include <gdf-arrow/gdf-arrow.h> int gdf_arrow_cuda_last_error() { return hipGetLastError(); } const char * gdf_arrow_cuda_error_string(int cuda_error) { return hipGetErrorString((hipError_t)cuda_error); } const char * gdf_arrow_cuda_error_name(int cuda_error) { return hipGetErrorName((hipError_t)cuda_error); }
954733e6d6198345a70f2fcb854d86244192fd88.cu
#include <gdf-arrow/gdf-arrow.h> int gdf_arrow_cuda_last_error() { return cudaGetLastError(); } const char * gdf_arrow_cuda_error_string(int cuda_error) { return cudaGetErrorString((cudaError_t)cuda_error); } const char * gdf_arrow_cuda_error_name(int cuda_error) { return cudaGetErrorName((cudaError_t)cuda_error); }
fc41b5af48fc8fceaf5ef9988cdb6b3142669439.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <math.h> #define N 16 __global__ void add(int* a, int* b, int* c) { int localIdx = blockIdx.x*blockDim.x + threadIdx.x; if(localIdx < N) { c[localIdx] = a[localIdx] + b[localIdx]; } } int main( int argc, char** argv ) { int a[N], b[N], c[N]; int *dev_a, *dev_b, *dev_c; // Initialize arrays a and b with data for (int i=0; i < N; i++) { a[i] = 2*i; b[i] = -i; } // Allocate memory on the GPU hipMalloc( (void**)&dev_a, N * sizeof(int) ); hipMalloc( (void**)&dev_b, N * sizeof(int) ); hipMalloc( (void**)&dev_c, N * sizeof(int) ); // Copy the data from host to GPU memory hipMemcpy( dev_a, a, N * sizeof(int), hipMemcpyHostToDevice ); hipMemcpy( dev_b, b, N * sizeof(int), hipMemcpyHostToDevice ); // Compute the number of block necessary based on a constant number of threads per block // Be careful - this can launch more threads than we need, we need to handle this in the kernel! int threadsPerBlock = 9000; int blocks = (int)ceil((float)N/threadsPerBlock); // Launch the kernel hipLaunchKernelGGL(( add), dim3(blocks),dim3(threadsPerBlock), 0, 0, dev_a, dev_b, dev_c); // Move the result back from the GPU to the host hipMemcpy( c, dev_c, N * sizeof(int), hipMemcpyDeviceToHost ); for (int i=0; i < N; i++) { printf( "%d + %d = %d\n", a[i], b[i], c[i] ); } // Always free the memory you explicitly allocated hipFree( dev_a ); hipFree( dev_b ); hipFree( dev_c ); return 0; }
fc41b5af48fc8fceaf5ef9988cdb6b3142669439.cu
#include <stdio.h> #include <math.h> #define N 16 __global__ void add(int* a, int* b, int* c) { int localIdx = blockIdx.x*blockDim.x + threadIdx.x; if(localIdx < N) { c[localIdx] = a[localIdx] + b[localIdx]; } } int main( int argc, char** argv ) { int a[N], b[N], c[N]; int *dev_a, *dev_b, *dev_c; // Initialize arrays a and b with data for (int i=0; i < N; i++) { a[i] = 2*i; b[i] = -i; } // Allocate memory on the GPU cudaMalloc( (void**)&dev_a, N * sizeof(int) ); cudaMalloc( (void**)&dev_b, N * sizeof(int) ); cudaMalloc( (void**)&dev_c, N * sizeof(int) ); // Copy the data from host to GPU memory cudaMemcpy( dev_a, a, N * sizeof(int), cudaMemcpyHostToDevice ); cudaMemcpy( dev_b, b, N * sizeof(int), cudaMemcpyHostToDevice ); // Compute the number of block necessary based on a constant number of threads per block // Be careful - this can launch more threads than we need, we need to handle this in the kernel! int threadsPerBlock = 9000; int blocks = (int)ceil((float)N/threadsPerBlock); // Launch the kernel add<<<blocks,threadsPerBlock>>>(dev_a, dev_b, dev_c); // Move the result back from the GPU to the host cudaMemcpy( c, dev_c, N * sizeof(int), cudaMemcpyDeviceToHost ); for (int i=0; i < N; i++) { printf( "%d + %d = %d\n", a[i], b[i], c[i] ); } // Always free the memory you explicitly allocated cudaFree( dev_a ); cudaFree( dev_b ); cudaFree( dev_c ); return 0; }
3c3374aff7ec5fba493a5f2d46f8db3980f439bf.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "taco2ProjectionKernel.h" #include "hip/hip_runtime.h" #include <stdexcept> #include <string> using namespace tts; namespace nvinfer1 { namespace plugin { /****************************************************************************** * CONSTANTS ****************************************************************** *****************************************************************************/ namespace { constexpr const int PROJECTION_COL_SIZE = 512; constexpr const int WARP_SIZE = 32; } // namespace /****************************************************************************** * KERNELS ******************************************************************** *****************************************************************************/ template <typename T, int NUM_THREADS> __device__ inline T warpSum(T const initVal) { constexpr const uint32_t mask = 0xffffffff >> (WARP_SIZE - NUM_THREADS); T val = initVal; #pragma unroll for (int d = NUM_THREADS / 2; d > 0; d /= 2) { val += __shfl_down_sync(mask, val, d, NUM_THREADS); } return val; } template <typename T, int BLOCK_SIZE> __device__ T cooperativeSum(T const initVal, T* const buffer) { // first all warps reduce to single value assert(BLOCK_SIZE % WARP_SIZE == 0); assert(BLOCK_SIZE <= WARP_SIZE * WARP_SIZE); T val = warpSum<T, WARP_SIZE>(initVal); if (threadIdx.x % WARP_SIZE == 0) { buffer[threadIdx.x / WARP_SIZE] = val; } __syncthreads(); if (threadIdx.x < (BLOCK_SIZE / WARP_SIZE)) { val = warpSum<T, BLOCK_SIZE / WARP_SIZE>(buffer[threadIdx.x]); } return val; } __device__ inline void sumReduce(float* const array, const int len) { for (int d = 1; d < blockDim.x; d *= 2) { if (threadIdx.x % (d * 2) == 0 && threadIdx.x + d < len) { array[threadIdx.x] += array[threadIdx.x + d]; } __syncthreads(); } } template <int INPUT_1_LENGTH, int INPUT_2_LENGTH> __global__ void projectionKernel(const float* const weights, const float* const bias, const float* const input1, const float* const input2, float* const output) { __shared__ float shared[PROJECTION_COL_SIZE]; // perform mat vec float v = 0.0f; constexpr const int inputLength = INPUT_1_LENGTH + INPUT_2_LENGTH; for (int col = threadIdx.x; col < INPUT_1_LENGTH; col += PROJECTION_COL_SIZE) { // load chunk if (col < INPUT_1_LENGTH) { v += input1[col] * weights[blockIdx.x * inputLength + col]; } } for (int col = threadIdx.x; col < INPUT_2_LENGTH; col += PROJECTION_COL_SIZE) { // load chunk if (col < INPUT_2_LENGTH) { v += input2[col] * weights[blockIdx.x * inputLength + (col + INPUT_1_LENGTH)]; } } v = cooperativeSum<float, PROJECTION_COL_SIZE>(v, shared); // add bias and write if (threadIdx.x == 0) { output[blockIdx.x] = bias[blockIdx.x] + v; } } /****************************************************************************** * CONSTRUCTORS / DESTRUCTOR ************************************************** *****************************************************************************/ Taco2ProjectionKernel::Taco2ProjectionKernel(const std::vector<float>& fcWeightsHost, const std::vector<float>& fcBiasHost, const int input1Length, const int input2Length, const int numDimension) : mInput1Length(input1Length) , mInput2Length(input2Length) , mInputLength(input1Length + input2Length) , mNumDimension(numDimension) , mWeightsDevice() , mBiasDevice() { const size_t numExpectedWeights = mInputLength * mNumDimension; const size_t numExpectedBias = mNumDimension; if (numExpectedWeights != fcWeightsHost.size()) { throw std::runtime_error("Expected " + std::to_string(numExpectedWeights) + " weights for FC but got " + std::to_string(fcWeightsHost.size()) + " instead."); } if (numExpectedBias != fcBiasHost.size()) { throw std::runtime_error("Expected " + std::to_string(numExpectedBias) + " biases for FC but got " + std::to_string(fcBiasHost.size()) + " instead."); } // copy up weights to GPU in row major and concatenated mWeightsDevice = CudaMemory<float>(fcWeightsHost); mBiasDevice = CudaMemory<float>(fcBiasHost); } /****************************************************************************** * PUBLIC METHODS ************************************************************* *****************************************************************************/ void Taco2ProjectionKernel::execute( const float* input1Device, const float* input2Device, float* outputDevice, hipStream_t stream) { const dim3 grid(mNumDimension); const dim3 block(PROJECTION_COL_SIZE); if (mInput1Length != 1024) { throw std::runtime_error( "Plugin is configured to only handle hidden " "input length of 1024, but got " + std::to_string(mInput1Length)); } if (mInput2Length != 512) { throw std::runtime_error( "Plugin is configured to only handle context " "input length of 512, but got " + std::to_string(mInput1Length)); } hipLaunchKernelGGL(( projectionKernel<1024, 512>), dim3(grid), dim3(block), 0, stream, mWeightsDevice.data(), mBiasDevice.data(), input1Device, input2Device, outputDevice); } } // namespace plugin } // namespace nvinfer1
3c3374aff7ec5fba493a5f2d46f8db3980f439bf.cu
/* * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "taco2ProjectionKernel.h" #include "cuda_runtime.h" #include <stdexcept> #include <string> using namespace tts; namespace nvinfer1 { namespace plugin { /****************************************************************************** * CONSTANTS ****************************************************************** *****************************************************************************/ namespace { constexpr const int PROJECTION_COL_SIZE = 512; constexpr const int WARP_SIZE = 32; } // namespace /****************************************************************************** * KERNELS ******************************************************************** *****************************************************************************/ template <typename T, int NUM_THREADS> __device__ inline T warpSum(T const initVal) { constexpr const uint32_t mask = 0xffffffff >> (WARP_SIZE - NUM_THREADS); T val = initVal; #pragma unroll for (int d = NUM_THREADS / 2; d > 0; d /= 2) { val += __shfl_down_sync(mask, val, d, NUM_THREADS); } return val; } template <typename T, int BLOCK_SIZE> __device__ T cooperativeSum(T const initVal, T* const buffer) { // first all warps reduce to single value assert(BLOCK_SIZE % WARP_SIZE == 0); assert(BLOCK_SIZE <= WARP_SIZE * WARP_SIZE); T val = warpSum<T, WARP_SIZE>(initVal); if (threadIdx.x % WARP_SIZE == 0) { buffer[threadIdx.x / WARP_SIZE] = val; } __syncthreads(); if (threadIdx.x < (BLOCK_SIZE / WARP_SIZE)) { val = warpSum<T, BLOCK_SIZE / WARP_SIZE>(buffer[threadIdx.x]); } return val; } __device__ inline void sumReduce(float* const array, const int len) { for (int d = 1; d < blockDim.x; d *= 2) { if (threadIdx.x % (d * 2) == 0 && threadIdx.x + d < len) { array[threadIdx.x] += array[threadIdx.x + d]; } __syncthreads(); } } template <int INPUT_1_LENGTH, int INPUT_2_LENGTH> __global__ void projectionKernel(const float* const weights, const float* const bias, const float* const input1, const float* const input2, float* const output) { __shared__ float shared[PROJECTION_COL_SIZE]; // perform mat vec float v = 0.0f; constexpr const int inputLength = INPUT_1_LENGTH + INPUT_2_LENGTH; for (int col = threadIdx.x; col < INPUT_1_LENGTH; col += PROJECTION_COL_SIZE) { // load chunk if (col < INPUT_1_LENGTH) { v += input1[col] * weights[blockIdx.x * inputLength + col]; } } for (int col = threadIdx.x; col < INPUT_2_LENGTH; col += PROJECTION_COL_SIZE) { // load chunk if (col < INPUT_2_LENGTH) { v += input2[col] * weights[blockIdx.x * inputLength + (col + INPUT_1_LENGTH)]; } } v = cooperativeSum<float, PROJECTION_COL_SIZE>(v, shared); // add bias and write if (threadIdx.x == 0) { output[blockIdx.x] = bias[blockIdx.x] + v; } } /****************************************************************************** * CONSTRUCTORS / DESTRUCTOR ************************************************** *****************************************************************************/ Taco2ProjectionKernel::Taco2ProjectionKernel(const std::vector<float>& fcWeightsHost, const std::vector<float>& fcBiasHost, const int input1Length, const int input2Length, const int numDimension) : mInput1Length(input1Length) , mInput2Length(input2Length) , mInputLength(input1Length + input2Length) , mNumDimension(numDimension) , mWeightsDevice() , mBiasDevice() { const size_t numExpectedWeights = mInputLength * mNumDimension; const size_t numExpectedBias = mNumDimension; if (numExpectedWeights != fcWeightsHost.size()) { throw std::runtime_error("Expected " + std::to_string(numExpectedWeights) + " weights for FC but got " + std::to_string(fcWeightsHost.size()) + " instead."); } if (numExpectedBias != fcBiasHost.size()) { throw std::runtime_error("Expected " + std::to_string(numExpectedBias) + " biases for FC but got " + std::to_string(fcBiasHost.size()) + " instead."); } // copy up weights to GPU in row major and concatenated mWeightsDevice = CudaMemory<float>(fcWeightsHost); mBiasDevice = CudaMemory<float>(fcBiasHost); } /****************************************************************************** * PUBLIC METHODS ************************************************************* *****************************************************************************/ void Taco2ProjectionKernel::execute( const float* input1Device, const float* input2Device, float* outputDevice, cudaStream_t stream) { const dim3 grid(mNumDimension); const dim3 block(PROJECTION_COL_SIZE); if (mInput1Length != 1024) { throw std::runtime_error( "Plugin is configured to only handle hidden " "input length of 1024, but got " + std::to_string(mInput1Length)); } if (mInput2Length != 512) { throw std::runtime_error( "Plugin is configured to only handle context " "input length of 512, but got " + std::to_string(mInput1Length)); } projectionKernel<1024, 512><<<grid, block, 0, stream>>>( mWeightsDevice.data(), mBiasDevice.data(), input1Device, input2Device, outputDevice); } } // namespace plugin } // namespace nvinfer1
23d4341aeeee7394431c885e13c99aa5a137d7b2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <io/parquet/parquet_gpu.hpp> #include <io/utilities/block_utils.cuh> #include <cudf/utilities/error.hpp> #include <rmm/cuda_stream_view.hpp> #include <hipcub/hipcub.hpp> namespace cudf { namespace io { namespace parquet { namespace gpu { struct dict_state_s { uint32_t row_cnt; PageFragment *cur_fragment; uint32_t *hashmap; uint32_t total_dict_entries; //!< Total number of entries in dictionary uint32_t dictionary_size; //!< Total dictionary size in bytes uint32_t num_dict_entries; //!< Dictionary entries in current fragment to add uint32_t frag_dict_size; EncColumnChunk ck; EncColumnDesc col; PageFragment frag; volatile uint32_t scratch_red[32]; uint16_t frag_dict[max_page_fragment_size]; }; /** * @brief Computes a 16-bit dictionary hash */ inline __device__ uint32_t uint32_hash16(uint32_t v) { return (v + (v >> 16)) & 0xffff; } inline __device__ uint32_t uint64_hash16(uint64_t v) { return uint32_hash16((uint32_t)(v + (v >> 32))); } inline __device__ uint32_t nvstr_hash16(const uint8_t *p, uint32_t len) { uint32_t hash = len; if (len > 0) { uint32_t align_p = 3 & reinterpret_cast<uintptr_t>(p); const uint32_t *p32 = reinterpret_cast<const uint32_t *>(p - align_p); uint32_t ofs = align_p * 8; uint32_t v; while (len > 4) { v = *p32++; if (ofs) { v = __funnelshift_r(v, *p32, ofs); } hash = __funnelshift_l(hash, hash, 5) + v; len -= 4; } v = *p32; if (ofs) { v = __funnelshift_r(v, (align_p + len > 4) ? p32[1] : 0, ofs); } v &= ((2 << (len * 8 - 1)) - 1); hash = __funnelshift_l(hash, hash, 5) + v; } return uint32_hash16(hash); } /** * @brief Fetch a page fragment and its dictionary entries in row-ascending order * * @param[in,out] s dictionary state * @param[in,out] dict_data fragment dictionary data for the current column (zeroed out after *fetching) * @param[in] frag_start_row row position of current fragment * @param[in] t thread id */ __device__ void FetchDictionaryFragment(dict_state_s *s, uint32_t *dict_data, uint32_t frag_start_row, uint32_t t) { if (t == 0) s->frag = *s->cur_fragment; __syncthreads(); // Store the row values in shared mem and set the corresponding dict_data to zero (end-of-list) // It's easiest to do this here since we're only dealing with values all within a 5K-row window for (uint32_t i = t; i < s->frag.num_dict_vals; i += 1024) { uint32_t r = dict_data[frag_start_row + i] - frag_start_row; s->frag_dict[i] = r; } __syncthreads(); for (uint32_t i = t; i < s->frag.num_dict_vals; i += 1024) { uint32_t r = s->frag_dict[i]; dict_data[frag_start_row + r] = 0; } __syncthreads(); } /// Generate dictionary indices in ascending row order __device__ void GenerateDictionaryIndices(dict_state_s *s, uint32_t t) { uint32_t *dict_index = s->col.dict_index; uint32_t *dict_data = s->col.dict_data + s->ck.start_row; const uint32_t *valid_map = s->col.valid_map_base; uint32_t num_dict_entries = 0; for (uint32_t i = 0; i < s->row_cnt; i += 1024) { uint32_t row = s->ck.start_row + i + t; uint32_t is_valid = (i + t < s->row_cnt && row < s->col.num_rows) ? (valid_map) ? (valid_map[row >> 5] >> (row & 0x1f)) & 1 : 1 : 0; uint32_t dict_idx = (is_valid) ? dict_index[row] : 0; uint32_t is_unique = (is_valid && dict_idx == row); // Any value that doesn't have bit31 set should have dict_idx=row at this point uint32_t umask = ballot(is_unique); uint32_t pos = num_dict_entries + __popc(umask & ((1 << (t & 0x1f)) - 1)); if (!(t & 0x1f)) { s->scratch_red[t >> 5] = __popc(umask); } num_dict_entries += __syncthreads_count(is_unique); if (t < 32) { s->scratch_red[t] = WarpReducePos32(s->scratch_red[t], t); } __syncthreads(); if (t >= 32) { pos += s->scratch_red[(t - 32) >> 5]; } if (is_valid && is_unique) { dict_data[pos] = row; dict_index[row] = pos; } __syncthreads(); if (is_valid && !is_unique) { // NOTE: Should have at most 3 iterations (once for early duplicate elimination, once for // final dictionary duplicate elimination and once for re-ordering) (If something went wrong // building the dictionary, it will likely hang or crash right here) do { dict_idx = dict_index[dict_idx & 0x7fffffff]; } while (dict_idx > 0x7fffffff); dict_index[row] = dict_idx; } } } // blockDim(1024, 1, 1) template <int block_size> __global__ void __launch_bounds__(block_size, 1) gpuBuildChunkDictionaries(EncColumnChunk *chunks, uint32_t *dev_scratch) { __shared__ __align__(8) dict_state_s state_g; using warp_reduce = hipcub::WarpReduce<uint32_t>; __shared__ typename warp_reduce::TempStorage temp_storage[block_size / 32]; dict_state_s *const s = &state_g; uint32_t t = threadIdx.x; uint32_t dtype, dtype_len, dtype_len_in; if (t == 0) s->ck = chunks[blockIdx.x]; __syncthreads(); if (!s->ck.has_dictionary) { return; } if (t == 0) s->col = *s->ck.col_desc; __syncthreads(); if (!t) { s->hashmap = dev_scratch + s->ck.dictionary_id * (size_t)(1 << kDictHashBits); s->row_cnt = 0; s->cur_fragment = s->ck.fragments; s->total_dict_entries = 0; s->dictionary_size = 0; s->ck.num_dict_fragments = 0; } dtype = s->col.physical_type; dtype_len = (dtype == INT96) ? 12 : (dtype == INT64 || dtype == DOUBLE) ? 8 : 4; if (dtype == INT32) { dtype_len_in = GetDtypeLogicalLen(s->col.converted_type); } else if (dtype == INT96) { dtype_len_in = 8; } else { dtype_len_in = (dtype == BYTE_ARRAY) ? sizeof(nvstrdesc_s) : dtype_len; } __syncthreads(); while (s->row_cnt < s->ck.num_rows) { uint32_t frag_start_row = s->ck.start_row + s->row_cnt, num_dict_entries, frag_dict_size; FetchDictionaryFragment(s, s->col.dict_data, frag_start_row, t); __syncthreads(); num_dict_entries = s->frag.num_dict_vals; if (!t) { s->num_dict_entries = 0; s->frag_dict_size = 0; } for (uint32_t i = 0; i < num_dict_entries; i += 1024) { bool is_valid = (i + t < num_dict_entries); uint32_t len = 0; uint32_t is_dupe = 0; uint32_t row, hash, next, *next_addr; uint32_t new_dict_entries; if (is_valid) { row = frag_start_row + s->frag_dict[i + t]; len = dtype_len; if (dtype == BYTE_ARRAY) { const char *ptr = static_cast<const nvstrdesc_s *>(s->col.column_data_base)[row].ptr; uint32_t count = static_cast<uint32_t>( static_cast<const nvstrdesc_s *>(s->col.column_data_base)[row].count); len += count; hash = nvstr_hash16(reinterpret_cast<const uint8_t *>(ptr), count); // Walk the list of rows with the same hash next_addr = &s->hashmap[hash]; while ((next = atomicCAS(next_addr, 0, row + 1)) != 0) { const char *ptr2 = static_cast<const nvstrdesc_s *>(s->col.column_data_base)[next - 1].ptr; uint32_t count2 = static_cast<const nvstrdesc_s *>(s->col.column_data_base)[next - 1].count; if (count2 == count && nvstr_is_equal(ptr, count, ptr2, count2)) { is_dupe = 1; break; } next_addr = &s->col.dict_data[next - 1]; } } else { uint64_t val; if (dtype_len_in == 8) { val = static_cast<const uint64_t *>(s->col.column_data_base)[row]; hash = uint64_hash16(val); } else { val = (dtype_len_in == 4) ? static_cast<const uint32_t *>(s->col.column_data_base)[row] : (dtype_len_in == 2) ? static_cast<const uint16_t *>(s->col.column_data_base)[row] : static_cast<const uint8_t *>(s->col.column_data_base)[row]; hash = uint32_hash16(val); } // Walk the list of rows with the same hash next_addr = &s->hashmap[hash]; while ((next = atomicCAS(next_addr, 0, row + 1)) != 0) { uint64_t val2 = (dtype_len_in == 8) ? static_cast<const uint64_t *>(s->col.column_data_base)[next - 1] : (dtype_len_in == 4) ? static_cast<const uint32_t *>(s->col.column_data_base)[next - 1] : (dtype_len_in == 2) ? static_cast<const uint16_t *>(s->col.column_data_base)[next - 1] : static_cast<const uint8_t *>(s->col.column_data_base)[next - 1]; if (val2 == val) { is_dupe = 1; break; } next_addr = &s->col.dict_data[next - 1]; } } } // Count the non-duplicate entries frag_dict_size = warp_reduce(temp_storage[t / 32]).Sum((is_valid && !is_dupe) ? len : 0); if (!(t & 0x1f)) { s->scratch_red[t >> 5] = frag_dict_size; } new_dict_entries = __syncthreads_count(is_valid && !is_dupe); if (t < 32) { frag_dict_size = warp_reduce(temp_storage[t / 32]).Sum(s->scratch_red[t]); if (t == 0) { s->frag_dict_size += frag_dict_size; s->num_dict_entries += new_dict_entries; } } if (is_valid) { if (!is_dupe) { s->col.dict_index[row] = row; } else { s->col.dict_index[row] = (next - 1) | (1u << 31); } } __syncthreads(); // At this point, the dictionary order is non-deterministic, and we want insertion order // Make sure that the non-duplicate entry corresponds to the lower row number // (The entry in dict_data (next-1) used for duplicate elimination does not need // to be the lowest row number) bool reorder_check = (is_valid && is_dupe && next - 1 > row); if (reorder_check) { next = s->col.dict_index[next - 1]; while (next & (1u << 31)) { next = s->col.dict_index[next & 0x7fffffff]; } } if (__syncthreads_or(reorder_check)) { if (reorder_check) { atomicMin(&s->col.dict_index[next], row); } __syncthreads(); if (reorder_check && s->col.dict_index[next] == row) { s->col.dict_index[next] = row | (1u << 31); s->col.dict_index[row] = row; } __syncthreads(); } } __syncthreads(); num_dict_entries = s->num_dict_entries; frag_dict_size = s->frag_dict_size; if (s->total_dict_entries + num_dict_entries > 65536 || (s->dictionary_size != 0 && s->dictionary_size + frag_dict_size > 512 * 1024)) { break; } __syncthreads(); if (!t) { if (num_dict_entries != s->frag.num_dict_vals) { s->cur_fragment->num_dict_vals = num_dict_entries; } if (frag_dict_size != s->frag.dict_data_size) { s->frag.dict_data_size = frag_dict_size; } s->total_dict_entries += num_dict_entries; s->dictionary_size += frag_dict_size; s->row_cnt += s->frag.num_rows; s->cur_fragment++; s->ck.num_dict_fragments++; } __syncthreads(); } __syncthreads(); GenerateDictionaryIndices(s, t); if (!t) { chunks[blockIdx.x].num_dict_fragments = s->ck.num_dict_fragments; chunks[blockIdx.x].dictionary_size = s->dictionary_size; chunks[blockIdx.x].total_dict_entries = s->total_dict_entries; } } /** * @brief Launches kernel for building chunk dictionaries * * @param[in,out] chunks Column chunks * @param[in] dev_scratch Device scratch data (kDictScratchSize per dictionary) * @param[in] num_chunks Number of column chunks * @param[in] stream CUDA stream to use, default 0 */ void BuildChunkDictionaries(EncColumnChunk *chunks, uint32_t *dev_scratch, size_t scratch_size, uint32_t num_chunks, rmm::cuda_stream_view stream) { if (num_chunks > 0 && scratch_size > 0) { // zero scratch size implies no dictionaries CUDA_TRY(hipMemsetAsync(dev_scratch, 0, scratch_size, stream.value())); hipLaunchKernelGGL(( gpuBuildChunkDictionaries<1024>), dim3(num_chunks), dim3(1024), 0, stream.value(), chunks, dev_scratch); } } } // namespace gpu } // namespace parquet } // namespace io } // namespace cudf
23d4341aeeee7394431c885e13c99aa5a137d7b2.cu
/* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <io/parquet/parquet_gpu.hpp> #include <io/utilities/block_utils.cuh> #include <cudf/utilities/error.hpp> #include <rmm/cuda_stream_view.hpp> #include <cub/cub.cuh> namespace cudf { namespace io { namespace parquet { namespace gpu { struct dict_state_s { uint32_t row_cnt; PageFragment *cur_fragment; uint32_t *hashmap; uint32_t total_dict_entries; //!< Total number of entries in dictionary uint32_t dictionary_size; //!< Total dictionary size in bytes uint32_t num_dict_entries; //!< Dictionary entries in current fragment to add uint32_t frag_dict_size; EncColumnChunk ck; EncColumnDesc col; PageFragment frag; volatile uint32_t scratch_red[32]; uint16_t frag_dict[max_page_fragment_size]; }; /** * @brief Computes a 16-bit dictionary hash */ inline __device__ uint32_t uint32_hash16(uint32_t v) { return (v + (v >> 16)) & 0xffff; } inline __device__ uint32_t uint64_hash16(uint64_t v) { return uint32_hash16((uint32_t)(v + (v >> 32))); } inline __device__ uint32_t nvstr_hash16(const uint8_t *p, uint32_t len) { uint32_t hash = len; if (len > 0) { uint32_t align_p = 3 & reinterpret_cast<uintptr_t>(p); const uint32_t *p32 = reinterpret_cast<const uint32_t *>(p - align_p); uint32_t ofs = align_p * 8; uint32_t v; while (len > 4) { v = *p32++; if (ofs) { v = __funnelshift_r(v, *p32, ofs); } hash = __funnelshift_l(hash, hash, 5) + v; len -= 4; } v = *p32; if (ofs) { v = __funnelshift_r(v, (align_p + len > 4) ? p32[1] : 0, ofs); } v &= ((2 << (len * 8 - 1)) - 1); hash = __funnelshift_l(hash, hash, 5) + v; } return uint32_hash16(hash); } /** * @brief Fetch a page fragment and its dictionary entries in row-ascending order * * @param[in,out] s dictionary state * @param[in,out] dict_data fragment dictionary data for the current column (zeroed out after *fetching) * @param[in] frag_start_row row position of current fragment * @param[in] t thread id */ __device__ void FetchDictionaryFragment(dict_state_s *s, uint32_t *dict_data, uint32_t frag_start_row, uint32_t t) { if (t == 0) s->frag = *s->cur_fragment; __syncthreads(); // Store the row values in shared mem and set the corresponding dict_data to zero (end-of-list) // It's easiest to do this here since we're only dealing with values all within a 5K-row window for (uint32_t i = t; i < s->frag.num_dict_vals; i += 1024) { uint32_t r = dict_data[frag_start_row + i] - frag_start_row; s->frag_dict[i] = r; } __syncthreads(); for (uint32_t i = t; i < s->frag.num_dict_vals; i += 1024) { uint32_t r = s->frag_dict[i]; dict_data[frag_start_row + r] = 0; } __syncthreads(); } /// Generate dictionary indices in ascending row order __device__ void GenerateDictionaryIndices(dict_state_s *s, uint32_t t) { uint32_t *dict_index = s->col.dict_index; uint32_t *dict_data = s->col.dict_data + s->ck.start_row; const uint32_t *valid_map = s->col.valid_map_base; uint32_t num_dict_entries = 0; for (uint32_t i = 0; i < s->row_cnt; i += 1024) { uint32_t row = s->ck.start_row + i + t; uint32_t is_valid = (i + t < s->row_cnt && row < s->col.num_rows) ? (valid_map) ? (valid_map[row >> 5] >> (row & 0x1f)) & 1 : 1 : 0; uint32_t dict_idx = (is_valid) ? dict_index[row] : 0; uint32_t is_unique = (is_valid && dict_idx == row); // Any value that doesn't have bit31 set should have dict_idx=row at this point uint32_t umask = ballot(is_unique); uint32_t pos = num_dict_entries + __popc(umask & ((1 << (t & 0x1f)) - 1)); if (!(t & 0x1f)) { s->scratch_red[t >> 5] = __popc(umask); } num_dict_entries += __syncthreads_count(is_unique); if (t < 32) { s->scratch_red[t] = WarpReducePos32(s->scratch_red[t], t); } __syncthreads(); if (t >= 32) { pos += s->scratch_red[(t - 32) >> 5]; } if (is_valid && is_unique) { dict_data[pos] = row; dict_index[row] = pos; } __syncthreads(); if (is_valid && !is_unique) { // NOTE: Should have at most 3 iterations (once for early duplicate elimination, once for // final dictionary duplicate elimination and once for re-ordering) (If something went wrong // building the dictionary, it will likely hang or crash right here) do { dict_idx = dict_index[dict_idx & 0x7fffffff]; } while (dict_idx > 0x7fffffff); dict_index[row] = dict_idx; } } } // blockDim(1024, 1, 1) template <int block_size> __global__ void __launch_bounds__(block_size, 1) gpuBuildChunkDictionaries(EncColumnChunk *chunks, uint32_t *dev_scratch) { __shared__ __align__(8) dict_state_s state_g; using warp_reduce = cub::WarpReduce<uint32_t>; __shared__ typename warp_reduce::TempStorage temp_storage[block_size / 32]; dict_state_s *const s = &state_g; uint32_t t = threadIdx.x; uint32_t dtype, dtype_len, dtype_len_in; if (t == 0) s->ck = chunks[blockIdx.x]; __syncthreads(); if (!s->ck.has_dictionary) { return; } if (t == 0) s->col = *s->ck.col_desc; __syncthreads(); if (!t) { s->hashmap = dev_scratch + s->ck.dictionary_id * (size_t)(1 << kDictHashBits); s->row_cnt = 0; s->cur_fragment = s->ck.fragments; s->total_dict_entries = 0; s->dictionary_size = 0; s->ck.num_dict_fragments = 0; } dtype = s->col.physical_type; dtype_len = (dtype == INT96) ? 12 : (dtype == INT64 || dtype == DOUBLE) ? 8 : 4; if (dtype == INT32) { dtype_len_in = GetDtypeLogicalLen(s->col.converted_type); } else if (dtype == INT96) { dtype_len_in = 8; } else { dtype_len_in = (dtype == BYTE_ARRAY) ? sizeof(nvstrdesc_s) : dtype_len; } __syncthreads(); while (s->row_cnt < s->ck.num_rows) { uint32_t frag_start_row = s->ck.start_row + s->row_cnt, num_dict_entries, frag_dict_size; FetchDictionaryFragment(s, s->col.dict_data, frag_start_row, t); __syncthreads(); num_dict_entries = s->frag.num_dict_vals; if (!t) { s->num_dict_entries = 0; s->frag_dict_size = 0; } for (uint32_t i = 0; i < num_dict_entries; i += 1024) { bool is_valid = (i + t < num_dict_entries); uint32_t len = 0; uint32_t is_dupe = 0; uint32_t row, hash, next, *next_addr; uint32_t new_dict_entries; if (is_valid) { row = frag_start_row + s->frag_dict[i + t]; len = dtype_len; if (dtype == BYTE_ARRAY) { const char *ptr = static_cast<const nvstrdesc_s *>(s->col.column_data_base)[row].ptr; uint32_t count = static_cast<uint32_t>( static_cast<const nvstrdesc_s *>(s->col.column_data_base)[row].count); len += count; hash = nvstr_hash16(reinterpret_cast<const uint8_t *>(ptr), count); // Walk the list of rows with the same hash next_addr = &s->hashmap[hash]; while ((next = atomicCAS(next_addr, 0, row + 1)) != 0) { const char *ptr2 = static_cast<const nvstrdesc_s *>(s->col.column_data_base)[next - 1].ptr; uint32_t count2 = static_cast<const nvstrdesc_s *>(s->col.column_data_base)[next - 1].count; if (count2 == count && nvstr_is_equal(ptr, count, ptr2, count2)) { is_dupe = 1; break; } next_addr = &s->col.dict_data[next - 1]; } } else { uint64_t val; if (dtype_len_in == 8) { val = static_cast<const uint64_t *>(s->col.column_data_base)[row]; hash = uint64_hash16(val); } else { val = (dtype_len_in == 4) ? static_cast<const uint32_t *>(s->col.column_data_base)[row] : (dtype_len_in == 2) ? static_cast<const uint16_t *>(s->col.column_data_base)[row] : static_cast<const uint8_t *>(s->col.column_data_base)[row]; hash = uint32_hash16(val); } // Walk the list of rows with the same hash next_addr = &s->hashmap[hash]; while ((next = atomicCAS(next_addr, 0, row + 1)) != 0) { uint64_t val2 = (dtype_len_in == 8) ? static_cast<const uint64_t *>(s->col.column_data_base)[next - 1] : (dtype_len_in == 4) ? static_cast<const uint32_t *>(s->col.column_data_base)[next - 1] : (dtype_len_in == 2) ? static_cast<const uint16_t *>(s->col.column_data_base)[next - 1] : static_cast<const uint8_t *>(s->col.column_data_base)[next - 1]; if (val2 == val) { is_dupe = 1; break; } next_addr = &s->col.dict_data[next - 1]; } } } // Count the non-duplicate entries frag_dict_size = warp_reduce(temp_storage[t / 32]).Sum((is_valid && !is_dupe) ? len : 0); if (!(t & 0x1f)) { s->scratch_red[t >> 5] = frag_dict_size; } new_dict_entries = __syncthreads_count(is_valid && !is_dupe); if (t < 32) { frag_dict_size = warp_reduce(temp_storage[t / 32]).Sum(s->scratch_red[t]); if (t == 0) { s->frag_dict_size += frag_dict_size; s->num_dict_entries += new_dict_entries; } } if (is_valid) { if (!is_dupe) { s->col.dict_index[row] = row; } else { s->col.dict_index[row] = (next - 1) | (1u << 31); } } __syncthreads(); // At this point, the dictionary order is non-deterministic, and we want insertion order // Make sure that the non-duplicate entry corresponds to the lower row number // (The entry in dict_data (next-1) used for duplicate elimination does not need // to be the lowest row number) bool reorder_check = (is_valid && is_dupe && next - 1 > row); if (reorder_check) { next = s->col.dict_index[next - 1]; while (next & (1u << 31)) { next = s->col.dict_index[next & 0x7fffffff]; } } if (__syncthreads_or(reorder_check)) { if (reorder_check) { atomicMin(&s->col.dict_index[next], row); } __syncthreads(); if (reorder_check && s->col.dict_index[next] == row) { s->col.dict_index[next] = row | (1u << 31); s->col.dict_index[row] = row; } __syncthreads(); } } __syncthreads(); num_dict_entries = s->num_dict_entries; frag_dict_size = s->frag_dict_size; if (s->total_dict_entries + num_dict_entries > 65536 || (s->dictionary_size != 0 && s->dictionary_size + frag_dict_size > 512 * 1024)) { break; } __syncthreads(); if (!t) { if (num_dict_entries != s->frag.num_dict_vals) { s->cur_fragment->num_dict_vals = num_dict_entries; } if (frag_dict_size != s->frag.dict_data_size) { s->frag.dict_data_size = frag_dict_size; } s->total_dict_entries += num_dict_entries; s->dictionary_size += frag_dict_size; s->row_cnt += s->frag.num_rows; s->cur_fragment++; s->ck.num_dict_fragments++; } __syncthreads(); } __syncthreads(); GenerateDictionaryIndices(s, t); if (!t) { chunks[blockIdx.x].num_dict_fragments = s->ck.num_dict_fragments; chunks[blockIdx.x].dictionary_size = s->dictionary_size; chunks[blockIdx.x].total_dict_entries = s->total_dict_entries; } } /** * @brief Launches kernel for building chunk dictionaries * * @param[in,out] chunks Column chunks * @param[in] dev_scratch Device scratch data (kDictScratchSize per dictionary) * @param[in] num_chunks Number of column chunks * @param[in] stream CUDA stream to use, default 0 */ void BuildChunkDictionaries(EncColumnChunk *chunks, uint32_t *dev_scratch, size_t scratch_size, uint32_t num_chunks, rmm::cuda_stream_view stream) { if (num_chunks > 0 && scratch_size > 0) { // zero scratch size implies no dictionaries CUDA_TRY(cudaMemsetAsync(dev_scratch, 0, scratch_size, stream.value())); gpuBuildChunkDictionaries<1024><<<num_chunks, 1024, 0, stream.value()>>>(chunks, dev_scratch); } } } // namespace gpu } // namespace parquet } // namespace io } // namespace cudf
a8cd36335ae7d4eb8c71e8b6f569816d16a826ff.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" using namespace std; float *valuesf; float *weightf; float maxWf; float *matchf; const int fSUMFLAG=0; const int fKNAPSACKFLAG = 1; const int fAVGFLAG=2; const int fMATCHFLAG=3; const int fINVERSESUMFLAG=4; const int fMAXIMIZE=-1; const int fMINIMIZE=1; __global__ void setup_kernelf ( hiprandState_t *state, unsigned long seed ) { hiprand_init ( seed, 0, 0, &state[0] ); }
a8cd36335ae7d4eb8c71e8b6f569816d16a826ff.cu
#include "includes.h" using namespace std; float *valuesf; float *weightf; float maxWf; float *matchf; const int fSUMFLAG=0; const int fKNAPSACKFLAG = 1; const int fAVGFLAG=2; const int fMATCHFLAG=3; const int fINVERSESUMFLAG=4; const int fMAXIMIZE=-1; const int fMINIMIZE=1; __global__ void setup_kernelf ( curandState *state, unsigned long seed ) { curand_init ( seed, 0, 0, &state[0] ); }
4432c053193fd2e6229c869310ae1ee45398edd1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "Skiplist.h" #include <thrust/sort.h> #include <assert.h> Node *test_Init(Node *sl, Node *n_arr, int N, int gridsize, int blocksize) { hipLaunchKernelGGL(( Init), dim3(gridsize), dim3(blocksize), 0, 0, sl, n_arr, N); return sl; } Node *test_Connect(Node *sl, int N, int girdsize, int blocksize) { hipLaunchKernelGGL(( Connect), dim3(girdsize), dim3(blocksize), 0, 0, sl, N); return sl; } void shuffle(int *a, int n) { int i, j, tmp, T = 1000; while (T--) { i = rand() % n; j = rand() % n; tmp = a[i]; a[i] = a[j]; a[j] = tmp; } } struct timespec diff(timespec start, timespec end) { struct timespec temp; if ((end.tv_nsec - start.tv_nsec) < 0) { temp.tv_sec = end.tv_sec - start.tv_sec - 1; temp.tv_nsec = 1000000000 + end.tv_nsec - start.tv_nsec; } else { temp.tv_sec = end.tv_sec - start.tv_sec; temp.tv_nsec = end.tv_nsec - start.tv_nsec; } return temp; } struct timespec add(timespec start, timespec end) { struct timespec temp; if ((end.tv_nsec + start.tv_nsec) < 0) { temp.tv_sec = end.tv_sec + start.tv_sec + 1; temp.tv_nsec = end.tv_nsec + start.tv_nsec - 1000000000; } else { temp.tv_sec = end.tv_sec + start.tv_sec; temp.tv_nsec = end.tv_nsec + start.tv_nsec; } return temp; } int main(int argc, char *argv[]) { if (argc < 3) { printf("error:Need more argument\n"); return 0; } int gridsize = atoi(argv[1]); int blocksize = atoi(argv[2]); int N; Node *sl; Node *d_sl; Node *n_arr; Node *d_n_arr; struct timespec time1, time2, temp; int loop; // initializtion double time_used,sum=0; N = gridsize * blocksize / MAX_LEVEL; for(loop=1;loop<=1;loop++){ int *input = (int *)malloc(N * sizeof(int)); for (int i = 0; i < N; i++) { input[i] = i; } srand(time(NULL)); shuffle(input,N); thrust::host_vector<int> h_s(N); //give number to host_vector for(int i=0;i<N;i++){ h_s[i]=input[i]; } thrust::device_vector<int> d_s(h_s); clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &time1); thrust::sort(d_s.begin(), d_s.end()); // sorting clock_gettime(CLOCK_PROCESS_CPUTIME_ID,&time2); temp = diff(time1, time2); time_used = 1000 * (temp.tv_sec + (double)temp.tv_nsec / 1000000000.0); printf("Sorting time= %f\n", time_used); //copy device to host h_s=d_s; //check for(int i=0;i<N;i++) assert(h_s[i]==i); sl = (Node *)malloc(N * MAX_LEVEL * sizeof(Node)); n_arr = (Node *)malloc(N * sizeof(Node)); hipMalloc(&d_sl, N * MAX_LEVEL * sizeof(Node)); hipMalloc(&d_n_arr, N * sizeof(Node)); for (int i = 0; i < MAX_LEVEL * N; i++) { sl[i].key = -1; sl[i].level = 0; sl[i].nextIdx = -1; } srand(time(NULL)); for (int i = 0; i < N; i++) { n_arr[i].key = h_s[i]; n_arr[i].level = rand() % MAX_LEVEL + 1; } clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &time1); // timespec start hipMemcpy(d_sl, sl, N * MAX_LEVEL * sizeof(Node), hipMemcpyHostToDevice); hipMemcpy(d_n_arr, n_arr, N * sizeof(Node), hipMemcpyHostToDevice); test_Init(d_sl, d_n_arr, N, gridsize, blocksize); test_Connect(d_sl, N, gridsize, blocksize); hipMemcpy(sl, d_sl, N * MAX_LEVEL * sizeof(Node), hipMemcpyDeviceToHost); clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &time2); // timespec stop /*printf("Skiplist node value:\n"); for(int i=0 ; i<MAX_LEVEL*N ;i++){ printf("%2d ",sl[i].key); if(i%N==N-1) printf("\n"); } printf("Skiplist nextIdx:\n"); for(int i=0 ;i<MAX_LEVEL*N;i++){ printf("%2d ",sl[i].nextIdx%N); if(i%N==N-1) printf("\n"); }*/ temp = diff(time1, time2); time_used = 1000 * (temp.tv_sec + (double)temp.tv_nsec / 1000000000.0); sum+=time_used; free(input); free(sl); free(n_arr); hipFree(d_sl); hipFree(d_n_arr); } printf("%d\t%f\n", N, sum/loop); }
4432c053193fd2e6229c869310ae1ee45398edd1.cu
#include "Skiplist.h" #include <thrust/sort.h> #include <assert.h> Node *test_Init(Node *sl, Node *n_arr, int N, int gridsize, int blocksize) { Init<<<gridsize, blocksize>>>(sl, n_arr, N); return sl; } Node *test_Connect(Node *sl, int N, int girdsize, int blocksize) { Connect<<<girdsize, blocksize>>>(sl, N); return sl; } void shuffle(int *a, int n) { int i, j, tmp, T = 1000; while (T--) { i = rand() % n; j = rand() % n; tmp = a[i]; a[i] = a[j]; a[j] = tmp; } } struct timespec diff(timespec start, timespec end) { struct timespec temp; if ((end.tv_nsec - start.tv_nsec) < 0) { temp.tv_sec = end.tv_sec - start.tv_sec - 1; temp.tv_nsec = 1000000000 + end.tv_nsec - start.tv_nsec; } else { temp.tv_sec = end.tv_sec - start.tv_sec; temp.tv_nsec = end.tv_nsec - start.tv_nsec; } return temp; } struct timespec add(timespec start, timespec end) { struct timespec temp; if ((end.tv_nsec + start.tv_nsec) < 0) { temp.tv_sec = end.tv_sec + start.tv_sec + 1; temp.tv_nsec = end.tv_nsec + start.tv_nsec - 1000000000; } else { temp.tv_sec = end.tv_sec + start.tv_sec; temp.tv_nsec = end.tv_nsec + start.tv_nsec; } return temp; } int main(int argc, char *argv[]) { if (argc < 3) { printf("error:Need more argument\n"); return 0; } int gridsize = atoi(argv[1]); int blocksize = atoi(argv[2]); int N; Node *sl; Node *d_sl; Node *n_arr; Node *d_n_arr; struct timespec time1, time2, temp; int loop; // initializtion double time_used,sum=0; N = gridsize * blocksize / MAX_LEVEL; for(loop=1;loop<=1;loop++){ int *input = (int *)malloc(N * sizeof(int)); for (int i = 0; i < N; i++) { input[i] = i; } srand(time(NULL)); shuffle(input,N); thrust::host_vector<int> h_s(N); //give number to host_vector for(int i=0;i<N;i++){ h_s[i]=input[i]; } thrust::device_vector<int> d_s(h_s); clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &time1); thrust::sort(d_s.begin(), d_s.end()); // sorting clock_gettime(CLOCK_PROCESS_CPUTIME_ID,&time2); temp = diff(time1, time2); time_used = 1000 * (temp.tv_sec + (double)temp.tv_nsec / 1000000000.0); printf("Sorting time= %f\n", time_used); //copy device to host h_s=d_s; //check for(int i=0;i<N;i++) assert(h_s[i]==i); sl = (Node *)malloc(N * MAX_LEVEL * sizeof(Node)); n_arr = (Node *)malloc(N * sizeof(Node)); cudaMalloc(&d_sl, N * MAX_LEVEL * sizeof(Node)); cudaMalloc(&d_n_arr, N * sizeof(Node)); for (int i = 0; i < MAX_LEVEL * N; i++) { sl[i].key = -1; sl[i].level = 0; sl[i].nextIdx = -1; } srand(time(NULL)); for (int i = 0; i < N; i++) { n_arr[i].key = h_s[i]; n_arr[i].level = rand() % MAX_LEVEL + 1; } clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &time1); // timespec start cudaMemcpy(d_sl, sl, N * MAX_LEVEL * sizeof(Node), cudaMemcpyHostToDevice); cudaMemcpy(d_n_arr, n_arr, N * sizeof(Node), cudaMemcpyHostToDevice); test_Init(d_sl, d_n_arr, N, gridsize, blocksize); test_Connect(d_sl, N, gridsize, blocksize); cudaMemcpy(sl, d_sl, N * MAX_LEVEL * sizeof(Node), cudaMemcpyDeviceToHost); clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &time2); // timespec stop /*printf("Skiplist node value:\n"); for(int i=0 ; i<MAX_LEVEL*N ;i++){ printf("%2d ",sl[i].key); if(i%N==N-1) printf("\n"); } printf("Skiplist nextIdx:\n"); for(int i=0 ;i<MAX_LEVEL*N;i++){ printf("%2d ",sl[i].nextIdx%N); if(i%N==N-1) printf("\n"); }*/ temp = diff(time1, time2); time_used = 1000 * (temp.tv_sec + (double)temp.tv_nsec / 1000000000.0); sum+=time_used; free(input); free(sl); free(n_arr); cudaFree(d_sl); cudaFree(d_n_arr); } printf("%d\t%f\n", N, sum/loop); }
055fdc4620b1caf142c6c9b77dfec934b62d16fc.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> __global__ void global_reduce_kernel(float * d_out, float * d_in) { int myId = threadIdx.x + blockDim.x * blockIdx.x; int tid = threadIdx.x; // do reduction in global mem for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) { if (tid < s) { d_in[myId] += d_in[myId + s]; } __syncthreads(); // make sure all adds at one stage are done! } // only thread 0 writes result for this block back to global mem if (tid == 0) { d_out[blockIdx.x] = d_in[myId]; } } __global__ void shmem_reduce_kernel(float * d_out, const float * d_in) { // sdata is allocated in the kernel call: 3rd arg to <<<b, t, shmem>>> extern __shared__ float sdata[]; int myId = threadIdx.x + blockDim.x * blockIdx.x; int tid = threadIdx.x; // load shared mem from global mem sdata[tid] = d_in[myId]; __syncthreads(); // make sure entire block is loaded! // do reduction in shared mem for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) { if (tid < s) { sdata[tid] += sdata[tid + s]; } __syncthreads(); // make sure all adds at one stage are done! } // only thread 0 writes result for this block back to global mem if (tid == 0) { d_out[blockIdx.x] = sdata[0]; } } void reduce(float * d_out, float * d_intermediate, float * d_in, int size, bool usesSharedMemory) { // assumes that size is not greater than maxThreadsPerBlock^2 // and that size is a multiple of maxThreadsPerBlock const int maxThreadsPerBlock = 1024; int threads = maxThreadsPerBlock; int blocks = size / maxThreadsPerBlock; if (usesSharedMemory) { hipLaunchKernelGGL(( shmem_reduce_kernel), dim3(blocks), dim3(threads), threads * sizeof(float), 0, d_intermediate, d_in); } else { hipLaunchKernelGGL(( global_reduce_kernel), dim3(blocks), dim3(threads), 0, 0, d_intermediate, d_in); } // now we're down to one block left, so reduce it threads = blocks; // launch one thread for each block in prev step blocks = 1; if (usesSharedMemory) { hipLaunchKernelGGL(( shmem_reduce_kernel), dim3(blocks), dim3(threads), threads * sizeof(float), 0, d_out, d_intermediate); } else { hipLaunchKernelGGL(( global_reduce_kernel), dim3(blocks), dim3(threads), 0, 0, d_out, d_intermediate); } } int main(int argc, char **argv) { int deviceCount; hipGetDeviceCount(&deviceCount); printf("deviceCount:%d\n", deviceCount); if (deviceCount == 0) { fprintf(stderr, "error: no devices supporting CUDA.\n"); exit(EXIT_FAILURE); } int dev = 0; hipSetDevice(dev); hipDeviceProp_t devProps; if (hipGetDeviceProperties(&devProps, dev) == 0) { printf("Using device %d:\n", dev); printf("%s; global mem: %dB; compute v%d.%d; clock: %d kHz\n", devProps.name, (int)devProps.totalGlobalMem, (int)devProps.major, (int)devProps.minor, (int)devProps.clockRate); } const int ARRAY_SIZE = 1 << 10; // //printf("%d", ARRAY_SIZE); const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float); // generate the input array on the host float h_in[ARRAY_SIZE]; float sum = 0.0f; for(int i = 0; i < ARRAY_SIZE; i++) { // generate random float in [-1.0f, 1.0f] h_in[i] = -1.0f + (float)rand()/((float)RAND_MAX/2.0f); sum += h_in[i]; } // declare GPU memory pointers float * d_in, * d_intermediate, * d_out; // allocate GPU memory hipMalloc((void **) &d_in, ARRAY_BYTES); hipMalloc((void **) &d_intermediate, ARRAY_BYTES); // overallocated hipMalloc((void **) &d_out, sizeof(float)); // transfer the input array to the GPU hipMemcpy(d_in, h_in, ARRAY_BYTES, hipMemcpyHostToDevice); int whichKernel = 0; if (argc == 2) { whichKernel = atoi(argv[1]); } hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); // launch the kernel switch(whichKernel) { case 0: printf("Running global reduce\n"); hipEventRecord(start, 0); for (int i = 0; i < 100; i++) { reduce(d_out, d_intermediate, d_in, ARRAY_SIZE, false); } hipEventRecord(stop, 0); break; case 1: printf("Running reduce with shared mem\n"); hipEventRecord(start, 0); for (int i = 0; i < 100; i++) { reduce(d_out, d_intermediate, d_in, ARRAY_SIZE, true); } hipEventRecord(stop, 0); break; default: fprintf(stderr, "error: ran no kernel\n"); exit(EXIT_FAILURE); } hipEventSynchronize(stop); float elapsedTime; hipEventElapsedTime(&elapsedTime, start, stop); elapsedTime /= 100.0f; // 100 trials // copy back the sum from GPU float h_out; hipMemcpy(&h_out, d_out, sizeof(float), hipMemcpyDeviceToHost); printf("average time elapsed: %f\n", elapsedTime); // free GPU memory allocation hipFree(d_in); hipFree(d_intermediate); hipFree(d_out); return 0; }
055fdc4620b1caf142c6c9b77dfec934b62d16fc.cu
#include <stdio.h> #include <stdlib.h> #include <cuda_runtime.h> __global__ void global_reduce_kernel(float * d_out, float * d_in) { int myId = threadIdx.x + blockDim.x * blockIdx.x; int tid = threadIdx.x; // do reduction in global mem for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) { if (tid < s) { d_in[myId] += d_in[myId + s]; } __syncthreads(); // make sure all adds at one stage are done! } // only thread 0 writes result for this block back to global mem if (tid == 0) { d_out[blockIdx.x] = d_in[myId]; } } __global__ void shmem_reduce_kernel(float * d_out, const float * d_in) { // sdata is allocated in the kernel call: 3rd arg to <<<b, t, shmem>>> extern __shared__ float sdata[]; int myId = threadIdx.x + blockDim.x * blockIdx.x; int tid = threadIdx.x; // load shared mem from global mem sdata[tid] = d_in[myId]; __syncthreads(); // make sure entire block is loaded! // do reduction in shared mem for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) { if (tid < s) { sdata[tid] += sdata[tid + s]; } __syncthreads(); // make sure all adds at one stage are done! } // only thread 0 writes result for this block back to global mem if (tid == 0) { d_out[blockIdx.x] = sdata[0]; } } void reduce(float * d_out, float * d_intermediate, float * d_in, int size, bool usesSharedMemory) { // assumes that size is not greater than maxThreadsPerBlock^2 // and that size is a multiple of maxThreadsPerBlock const int maxThreadsPerBlock = 1024; int threads = maxThreadsPerBlock; int blocks = size / maxThreadsPerBlock; if (usesSharedMemory) { shmem_reduce_kernel<<<blocks, threads, threads * sizeof(float)>>> (d_intermediate, d_in); } else { global_reduce_kernel<<<blocks, threads>>> (d_intermediate, d_in); } // now we're down to one block left, so reduce it threads = blocks; // launch one thread for each block in prev step blocks = 1; if (usesSharedMemory) { shmem_reduce_kernel<<<blocks, threads, threads * sizeof(float)>>> (d_out, d_intermediate); } else { global_reduce_kernel<<<blocks, threads>>> (d_out, d_intermediate); } } int main(int argc, char **argv) { int deviceCount; cudaGetDeviceCount(&deviceCount); printf("deviceCount:%d\n", deviceCount); if (deviceCount == 0) { fprintf(stderr, "error: no devices supporting CUDA.\n"); exit(EXIT_FAILURE); } int dev = 0; cudaSetDevice(dev); cudaDeviceProp devProps; if (cudaGetDeviceProperties(&devProps, dev) == 0) { printf("Using device %d:\n", dev); printf("%s; global mem: %dB; compute v%d.%d; clock: %d kHz\n", devProps.name, (int)devProps.totalGlobalMem, (int)devProps.major, (int)devProps.minor, (int)devProps.clockRate); } const int ARRAY_SIZE = 1 << 10; //ÓÒÒÆÔËËã·û //printf("%d", ARRAY_SIZE); const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float); // generate the input array on the host float h_in[ARRAY_SIZE]; float sum = 0.0f; for(int i = 0; i < ARRAY_SIZE; i++) { // generate random float in [-1.0f, 1.0f] h_in[i] = -1.0f + (float)rand()/((float)RAND_MAX/2.0f); sum += h_in[i]; } // declare GPU memory pointers float * d_in, * d_intermediate, * d_out; // allocate GPU memory cudaMalloc((void **) &d_in, ARRAY_BYTES); cudaMalloc((void **) &d_intermediate, ARRAY_BYTES); // overallocated cudaMalloc((void **) &d_out, sizeof(float)); // transfer the input array to the GPU cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice); int whichKernel = 0; if (argc == 2) { whichKernel = atoi(argv[1]); } cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); // launch the kernel switch(whichKernel) { case 0: printf("Running global reduce\n"); cudaEventRecord(start, 0); for (int i = 0; i < 100; i++) { reduce(d_out, d_intermediate, d_in, ARRAY_SIZE, false); } cudaEventRecord(stop, 0); break; case 1: printf("Running reduce with shared mem\n"); cudaEventRecord(start, 0); for (int i = 0; i < 100; i++) { reduce(d_out, d_intermediate, d_in, ARRAY_SIZE, true); } cudaEventRecord(stop, 0); break; default: fprintf(stderr, "error: ran no kernel\n"); exit(EXIT_FAILURE); } cudaEventSynchronize(stop); float elapsedTime; cudaEventElapsedTime(&elapsedTime, start, stop); elapsedTime /= 100.0f; // 100 trials // copy back the sum from GPU float h_out; cudaMemcpy(&h_out, d_out, sizeof(float), cudaMemcpyDeviceToHost); printf("average time elapsed: %f\n", elapsedTime); // free GPU memory allocation cudaFree(d_in); cudaFree(d_intermediate); cudaFree(d_out); return 0; }
4ef4297b3eca177a66375780e62f161f2100dc2a.hip
// !!! This is a file automatically generated by hipify!!! #include <stdlib.h> #include <stdio.h> #include <ctype.h> #include <string.h> #include <iostream> #include <queue> #include <vector> #include <assert.h> #include <fstream> #include <hip/hip_runtime.h> #include <algorithm> #include <nvgraph.h> #include "imagem.h" #include <thrust/device_vector.h> #include <thrust/host_vector.h> #define MAX(y,x) (y>x?y:x) // Calcula valor maximo #define MIN(y,x) (y<x?y:x) // Calcula valor minimo //scp -i supercomp-final.pem -r ./segmentacao [email protected]:~/toy //scp -i supercomp-final.pem ./segmentacao/main_cuda.cu [email protected]:~/toy/segmentacao //scp -i supercomp-final.pem [email protected]:~/toy/segmentacao/saida.pgm ./segmentacao //ssh -i supercomp-final.pem [email protected] //nvcc -std=c++11 imagem.cpp main_cuda.cu -o segmentacao_cuda -lnvgraph // FILTRO DE BORDAS __global__ void edgeFilter(unsigned char *in, unsigned char *out, int rowEnd, int colEnd) { int i=blockIdx.x*blockDim.x+threadIdx.x; int j=blockIdx.y*blockDim.y+threadIdx.y; int rowStart = 0, colStart = 0; int di,dj; if(i < rowEnd && j < colEnd){ int min = 256; int max = 0; for(di = MAX(rowStart, i - 1); di <= MIN(i + 1, rowEnd - 1); di++) { for(dj = MAX(colStart, j - 1); dj <= MIN(j + 1, colEnd - 1); dj++) { if(min>in[di*(colEnd-colStart)+dj]) min = in[di*(colEnd-colStart)+dj]; if(max<in[di*(colEnd-colStart)+dj]) max = in[di*(colEnd-colStart)+dj]; } } out[i*(colEnd-colStart)+j] = max-min; } } // checagem de erros do nvgraph void check_status(nvgraphStatus_t status) { if ((int)status != 0) { printf("ERROR : %d\n",status); exit(0); } } // funco SSSP do nvgraph int SSSP(int size, int edges, std::vector<float> weights, std::vector<int> source, std::vector<int> dest_offset, int source_vert, std::vector<float> &out) { const size_t n = size; const size_t nnz = edges; float * sssp_1_h; int * source_indices_h = (int*) malloc(source.size()*sizeof(int)); int * destination_offsets_h = (int*) malloc(dest_offset.size()*sizeof(int)); float * weights_h = (float*)malloc(edges*sizeof(float)); // converso dos vetor do graph for (int i = 0; i < source.size(); i++){ source_indices_h[i] = source[i]; } for (int i = 0; i < weights.size(); i++){ weights_h[i] = weights[i]; } for (int i = 0; i < dest_offset.size(); i++){ destination_offsets_h[i] = dest_offset[i]; } const size_t vertex_numsets = 1, edge_numsets = 1; void** vertex_dim; // variaveis nvgraph nvgraphStatus_t status; nvgraphHandle_t handle; nvgraphGraphDescr_t graph; nvgraphCSCTopology32I_t CSC_input; hipDataType edge_dimT = HIP_R_32F; hipDataType* vertex_dimT; //dados de saida sssp_1_h = (float*)malloc(size*sizeof(float)); vertex_dim = (void**)malloc(vertex_numsets*sizeof(void*)); vertex_dimT = (hipDataType*)malloc(vertex_numsets*sizeof(hipDataType)); CSC_input = (nvgraphCSCTopology32I_t) malloc(sizeof(struct nvgraphCSCTopology32I_st)); vertex_dim[0]= (void*)sssp_1_h; //vertex_dim[1]; vertex_dimT[0] = HIP_R_32F; //vertex_dimT[1]= HIP_R_32F; check_status(nvgraphCreate(&handle)); check_status(nvgraphCreateGraphDescr (handle, &graph)); //parametros da montagem do grafo CSC_input->nvertices = n; CSC_input->nedges = nnz; CSC_input->destination_offsets = destination_offsets_h; CSC_input->source_indices = source_indices_h; // montagem do grafo check_status(nvgraphSetGraphStructure(handle, graph, (void*)CSC_input, NVGRAPH_CSC_32)); check_status(nvgraphAllocateVertexData(handle, graph, vertex_numsets, vertex_dimT)); check_status(nvgraphAllocateEdgeData (handle, graph, edge_numsets, &edge_dimT)); check_status(nvgraphSetEdgeData(handle, graph, (void*)weights_h, 0)); //sssp check_status(nvgraphSssp(handle, graph, 0, &source_vert, 0)); //pegar dados de saida check_status(nvgraphGetVertexData(handle, graph, (void*)sssp_1_h, 0)); for(int i = 0; i < CSC_input->nvertices;i++){ out[i] = sssp_1_h[i]; } //desalocando dados auxiliares free(destination_offsets_h); free(source_indices_h); free(weights_h); free(sssp_1_h); free(vertex_dim); free(vertex_dimT); free(CSC_input); //destroi o grafo check_status(nvgraphDestroyGraphDescr (handle, graph)); check_status(nvgraphDestroy (handle)); return 0; } // GERAO DOS VETORES void vectorsGen(imagem *img, std::vector<int> &seeds, std::vector<int> &source, std::vector<int> &dest_offset, std::vector<float> &weights,int ghost){ // inicia com um zero dest_offset.push_back(0); for(int pixel = 0; pixel < img->total_size ; pixel++){ int offset = dest_offset[pixel]; int pixel_row = pixel/img->rows; int pixel_col = pixel - pixel_row*img->rows; // tratamento da ghost seed if (find(begin(seeds), end(seeds), pixel) != end(seeds)) { source.push_back(ghost); weights.push_back(0.0); offset++; } // pixel de cima int acima = pixel - img->cols; if (pixel_row > 0) { offset++; source.push_back(acima); double custo = get_edge( img, pixel , acima); weights.push_back(custo); } // pixel de baixo int abaixo = pixel + img->cols; if (pixel_row < img->rows - 1) { offset++; source.push_back(abaixo); double custo = get_edge( img, pixel ,abaixo); weights.push_back(custo); } // pixel da direita int direita = pixel + 1; if (pixel_col < img->cols - 1) { offset++; source.push_back(direita); double custo = get_edge( img, pixel , direita); weights.push_back(custo); } // pixel da esquerda int esquerda = pixel - 1; if (pixel_col > 0) { offset++; source.push_back(esquerda); double custo = get_edge( img, pixel , esquerda); weights.push_back(custo); } dest_offset.push_back(offset); } } int main(int argc, char **argv) { if (argc < 3) { std::cout << "Uso: segmentacao_cuda entrada.pgm saida.pgm\n"; return -1; } //caminho do input e do output std::string path(argv[1]); std::string out_path(argv[2]); std::vector<int> source_fg,source_bg, dest_offset_fg, dest_offset_bg; std::vector<float> weights_fg, weights_bg; int n_fg, n_bg, x, y; float total_time, graph_time, sssp_time, output_time; // variaveis de contagem de tempo hipEvent_t total_begin, total_end, begin, end; hipEventCreate(&total_begin); hipEventCreate(&total_end); hipEventCreate(&begin); hipEventCreate(&end); imagem *img = read_pgm(path); int nrows = img->rows; int ncols = img->cols; // numero de sementes de frente e de fundo std::cout << "\n numero de sementes de frente e de fundo:\n"; std::cin >> n_fg >> n_bg; std::vector<int> seeds_fg(n_fg), seeds_bg(n_bg); if(n_fg <= 0 || n_bg <= 0){ std::cout << "numero de sementes no pode ser menor que zero"; return -1; } std::cout << "posies das sementes de frente:\n"; for(int i = 0; i < n_fg; i++) { std::cin >> x >> y; seeds_fg[i] = y * img->cols + x; } std::cout << "posies das sementes de fundo:\n"; for(int i = 0; i < n_bg; i++) { std::cin >> x >> y; seeds_bg[i] = y * img->cols + x; } // inicio da contagem de tempo total do programa hipEventRecord(total_begin); //FILTRO DE BORDAS imagem *edge = new_image(nrows, ncols); thrust::device_vector<unsigned char> input(img->pixels, img->pixels + img->total_size ); thrust::device_vector<unsigned char> output(edge->pixels, edge->pixels + edge->total_size ); dim3 dimGrid(ceil(nrows/16.0), ceil(ncols/16.0), 1); dim3 dimBlock(16, 16, 1); hipLaunchKernelGGL(( edgeFilter), dim3(dimGrid),dim3(dimBlock), 0, 0, thrust::raw_pointer_cast(input.data()), thrust::raw_pointer_cast(output.data()), nrows, ncols); thrust::host_vector<unsigned char> output_data(output); for(int i = 0; i != output_data.size(); i++) { edge->pixels[i] = output_data[i]; } write_pgm(edge, "edge.pgm"); int fg_ghost = img->total_size+1; int bg_ghost = img->total_size; // comeo da contagem de tempo da gerao dos vetores hipEventRecord(begin); //gerao dos vetores vectorsGen(edge, seeds_bg, source_bg, dest_offset_bg, weights_bg, bg_ghost); vectorsGen(edge, seeds_fg, source_fg, dest_offset_fg, weights_fg, fg_ghost); //fim da contagem hipEventRecord(end); hipEventSynchronize(end); hipEventElapsedTime(&graph_time, begin, end); //SSSP imagem *saida = new_image(nrows, ncols); //numero de arestas int edges_fg = 2*((ncols-1)*nrows+(nrows-1)*ncols) + n_fg; int edges_bg = 2*((ncols-1)*nrows+(nrows-1)*ncols) + n_bg; std::vector<float> out_fg (img->total_size); std::vector<float> out_bg (img->total_size); //inicio da contagem de tempo do sssp hipEventRecord(begin); //funes sssp para semente de frente e semente de fundo SSSP(img->total_size,edges_bg,weights_bg,source_bg,dest_offset_bg, bg_ghost, out_bg); SSSP(img->total_size,edges_fg,weights_fg,source_fg,dest_offset_fg, fg_ghost, out_fg); //fim da contagem hipEventRecord(end); hipEventSynchronize(end); hipEventElapsedTime(&sssp_time, begin, end); //inicio da contagem de tempo da construo da imagem de saida hipEventRecord(begin); //img de saida for (int i = 0;i < saida->total_size; i++) { if (out_fg[i] > out_bg[i]) { saida->pixels[i] = 0; } else { saida->pixels[i] = 255; } } write_pgm(saida, out_path); //fim da contagem hipEventRecord(end); hipEventSynchronize(end); hipEventElapsedTime(&output_time, begin, end); //fim da contagem de tempo total hipEventRecord(total_end); hipEventSynchronize(total_end); hipEventElapsedTime(&total_time, total_begin, total_end); hipEventDestroy(begin); hipEventDestroy(end); hipEventDestroy(total_begin); hipEventDestroy(total_end); std::cout << "\n--------------------------------------------\n"; std::cout << "--------------------TIME--------------------\n"; std::cout << "--------------------------------------------\n\n"; std::cout << "graph_time: " << graph_time << "\n"; std::cout << "sssp_time: " << sssp_time << "\n"; std::cout << "output_time: " << output_time << "\n"; std::cout << "total_time: " << total_time << "\n"; return 0; }
4ef4297b3eca177a66375780e62f161f2100dc2a.cu
#include <stdlib.h> #include <stdio.h> #include <ctype.h> #include <string.h> #include <iostream> #include <queue> #include <vector> #include <assert.h> #include <fstream> #include <cuda_runtime.h> #include <algorithm> #include <nvgraph.h> #include "imagem.h" #include <thrust/device_vector.h> #include <thrust/host_vector.h> #define MAX(y,x) (y>x?y:x) // Calcula valor maximo #define MIN(y,x) (y<x?y:x) // Calcula valor minimo //scp -i supercomp-final.pem -r ./segmentacao [email protected]:~/toy //scp -i supercomp-final.pem ./segmentacao/main_cuda.cu [email protected]:~/toy/segmentacao //scp -i supercomp-final.pem [email protected]:~/toy/segmentacao/saida.pgm ./segmentacao //ssh -i supercomp-final.pem [email protected] //nvcc -std=c++11 imagem.cpp main_cuda.cu -o segmentacao_cuda -lnvgraph // FILTRO DE BORDAS __global__ void edgeFilter(unsigned char *in, unsigned char *out, int rowEnd, int colEnd) { int i=blockIdx.x*blockDim.x+threadIdx.x; int j=blockIdx.y*blockDim.y+threadIdx.y; int rowStart = 0, colStart = 0; int di,dj; if(i < rowEnd && j < colEnd){ int min = 256; int max = 0; for(di = MAX(rowStart, i - 1); di <= MIN(i + 1, rowEnd - 1); di++) { for(dj = MAX(colStart, j - 1); dj <= MIN(j + 1, colEnd - 1); dj++) { if(min>in[di*(colEnd-colStart)+dj]) min = in[di*(colEnd-colStart)+dj]; if(max<in[di*(colEnd-colStart)+dj]) max = in[di*(colEnd-colStart)+dj]; } } out[i*(colEnd-colStart)+j] = max-min; } } // checagem de erros do nvgraph void check_status(nvgraphStatus_t status) { if ((int)status != 0) { printf("ERROR : %d\n",status); exit(0); } } // funcão SSSP do nvgraph int SSSP(int size, int edges, std::vector<float> weights, std::vector<int> source, std::vector<int> dest_offset, int source_vert, std::vector<float> &out) { const size_t n = size; const size_t nnz = edges; float * sssp_1_h; int * source_indices_h = (int*) malloc(source.size()*sizeof(int)); int * destination_offsets_h = (int*) malloc(dest_offset.size()*sizeof(int)); float * weights_h = (float*)malloc(edges*sizeof(float)); // conversão dos vetor do graph for (int i = 0; i < source.size(); i++){ source_indices_h[i] = source[i]; } for (int i = 0; i < weights.size(); i++){ weights_h[i] = weights[i]; } for (int i = 0; i < dest_offset.size(); i++){ destination_offsets_h[i] = dest_offset[i]; } const size_t vertex_numsets = 1, edge_numsets = 1; void** vertex_dim; // variaveis nvgraph nvgraphStatus_t status; nvgraphHandle_t handle; nvgraphGraphDescr_t graph; nvgraphCSCTopology32I_t CSC_input; cudaDataType_t edge_dimT = CUDA_R_32F; cudaDataType_t* vertex_dimT; //dados de saida sssp_1_h = (float*)malloc(size*sizeof(float)); vertex_dim = (void**)malloc(vertex_numsets*sizeof(void*)); vertex_dimT = (cudaDataType_t*)malloc(vertex_numsets*sizeof(cudaDataType_t)); CSC_input = (nvgraphCSCTopology32I_t) malloc(sizeof(struct nvgraphCSCTopology32I_st)); vertex_dim[0]= (void*)sssp_1_h; //vertex_dim[1]; vertex_dimT[0] = CUDA_R_32F; //vertex_dimT[1]= CUDA_R_32F; check_status(nvgraphCreate(&handle)); check_status(nvgraphCreateGraphDescr (handle, &graph)); //parametros da montagem do grafo CSC_input->nvertices = n; CSC_input->nedges = nnz; CSC_input->destination_offsets = destination_offsets_h; CSC_input->source_indices = source_indices_h; // montagem do grafo check_status(nvgraphSetGraphStructure(handle, graph, (void*)CSC_input, NVGRAPH_CSC_32)); check_status(nvgraphAllocateVertexData(handle, graph, vertex_numsets, vertex_dimT)); check_status(nvgraphAllocateEdgeData (handle, graph, edge_numsets, &edge_dimT)); check_status(nvgraphSetEdgeData(handle, graph, (void*)weights_h, 0)); //sssp check_status(nvgraphSssp(handle, graph, 0, &source_vert, 0)); //pegar dados de saida check_status(nvgraphGetVertexData(handle, graph, (void*)sssp_1_h, 0)); for(int i = 0; i < CSC_input->nvertices;i++){ out[i] = sssp_1_h[i]; } //desalocando dados auxiliares free(destination_offsets_h); free(source_indices_h); free(weights_h); free(sssp_1_h); free(vertex_dim); free(vertex_dimT); free(CSC_input); //destroi o grafo check_status(nvgraphDestroyGraphDescr (handle, graph)); check_status(nvgraphDestroy (handle)); return 0; } // GERAÇÃO DOS VETORES void vectorsGen(imagem *img, std::vector<int> &seeds, std::vector<int> &source, std::vector<int> &dest_offset, std::vector<float> &weights,int ghost){ // inicia com um zero dest_offset.push_back(0); for(int pixel = 0; pixel < img->total_size ; pixel++){ int offset = dest_offset[pixel]; int pixel_row = pixel/img->rows; int pixel_col = pixel - pixel_row*img->rows; // tratamento da ghost seed if (find(begin(seeds), end(seeds), pixel) != end(seeds)) { source.push_back(ghost); weights.push_back(0.0); offset++; } // pixel de cima int acima = pixel - img->cols; if (pixel_row > 0) { offset++; source.push_back(acima); double custo = get_edge( img, pixel , acima); weights.push_back(custo); } // pixel de baixo int abaixo = pixel + img->cols; if (pixel_row < img->rows - 1) { offset++; source.push_back(abaixo); double custo = get_edge( img, pixel ,abaixo); weights.push_back(custo); } // pixel da direita int direita = pixel + 1; if (pixel_col < img->cols - 1) { offset++; source.push_back(direita); double custo = get_edge( img, pixel , direita); weights.push_back(custo); } // pixel da esquerda int esquerda = pixel - 1; if (pixel_col > 0) { offset++; source.push_back(esquerda); double custo = get_edge( img, pixel , esquerda); weights.push_back(custo); } dest_offset.push_back(offset); } } int main(int argc, char **argv) { if (argc < 3) { std::cout << "Uso: segmentacao_cuda entrada.pgm saida.pgm\n"; return -1; } //caminho do input e do output std::string path(argv[1]); std::string out_path(argv[2]); std::vector<int> source_fg,source_bg, dest_offset_fg, dest_offset_bg; std::vector<float> weights_fg, weights_bg; int n_fg, n_bg, x, y; float total_time, graph_time, sssp_time, output_time; // variaveis de contagem de tempo cudaEvent_t total_begin, total_end, begin, end; cudaEventCreate(&total_begin); cudaEventCreate(&total_end); cudaEventCreate(&begin); cudaEventCreate(&end); imagem *img = read_pgm(path); int nrows = img->rows; int ncols = img->cols; // numero de sementes de frente e de fundo std::cout << "\n numero de sementes de frente e de fundo:\n"; std::cin >> n_fg >> n_bg; std::vector<int> seeds_fg(n_fg), seeds_bg(n_bg); if(n_fg <= 0 || n_bg <= 0){ std::cout << "numero de sementes não pode ser menor que zero"; return -1; } std::cout << "posições das sementes de frente:\n"; for(int i = 0; i < n_fg; i++) { std::cin >> x >> y; seeds_fg[i] = y * img->cols + x; } std::cout << "posições das sementes de fundo:\n"; for(int i = 0; i < n_bg; i++) { std::cin >> x >> y; seeds_bg[i] = y * img->cols + x; } // inicio da contagem de tempo total do programa cudaEventRecord(total_begin); //FILTRO DE BORDAS imagem *edge = new_image(nrows, ncols); thrust::device_vector<unsigned char> input(img->pixels, img->pixels + img->total_size ); thrust::device_vector<unsigned char> output(edge->pixels, edge->pixels + edge->total_size ); dim3 dimGrid(ceil(nrows/16.0), ceil(ncols/16.0), 1); dim3 dimBlock(16, 16, 1); edgeFilter<<<dimGrid,dimBlock>>>(thrust::raw_pointer_cast(input.data()), thrust::raw_pointer_cast(output.data()), nrows, ncols); thrust::host_vector<unsigned char> output_data(output); for(int i = 0; i != output_data.size(); i++) { edge->pixels[i] = output_data[i]; } write_pgm(edge, "edge.pgm"); int fg_ghost = img->total_size+1; int bg_ghost = img->total_size; // começo da contagem de tempo da geração dos vetores cudaEventRecord(begin); //geração dos vetores vectorsGen(edge, seeds_bg, source_bg, dest_offset_bg, weights_bg, bg_ghost); vectorsGen(edge, seeds_fg, source_fg, dest_offset_fg, weights_fg, fg_ghost); //fim da contagem cudaEventRecord(end); cudaEventSynchronize(end); cudaEventElapsedTime(&graph_time, begin, end); //SSSP imagem *saida = new_image(nrows, ncols); //numero de arestas int edges_fg = 2*((ncols-1)*nrows+(nrows-1)*ncols) + n_fg; int edges_bg = 2*((ncols-1)*nrows+(nrows-1)*ncols) + n_bg; std::vector<float> out_fg (img->total_size); std::vector<float> out_bg (img->total_size); //inicio da contagem de tempo do sssp cudaEventRecord(begin); //funções sssp para semente de frente e semente de fundo SSSP(img->total_size,edges_bg,weights_bg,source_bg,dest_offset_bg, bg_ghost, out_bg); SSSP(img->total_size,edges_fg,weights_fg,source_fg,dest_offset_fg, fg_ghost, out_fg); //fim da contagem cudaEventRecord(end); cudaEventSynchronize(end); cudaEventElapsedTime(&sssp_time, begin, end); //inicio da contagem de tempo da construção da imagem de saida cudaEventRecord(begin); //img de saida for (int i = 0;i < saida->total_size; i++) { if (out_fg[i] > out_bg[i]) { saida->pixels[i] = 0; } else { saida->pixels[i] = 255; } } write_pgm(saida, out_path); //fim da contagem cudaEventRecord(end); cudaEventSynchronize(end); cudaEventElapsedTime(&output_time, begin, end); //fim da contagem de tempo total cudaEventRecord(total_end); cudaEventSynchronize(total_end); cudaEventElapsedTime(&total_time, total_begin, total_end); cudaEventDestroy(begin); cudaEventDestroy(end); cudaEventDestroy(total_begin); cudaEventDestroy(total_end); std::cout << "\n--------------------------------------------\n"; std::cout << "--------------------TIME--------------------\n"; std::cout << "--------------------------------------------\n\n"; std::cout << "graph_time: " << graph_time << "\n"; std::cout << "sssp_time: " << sssp_time << "\n"; std::cout << "output_time: " << output_time << "\n"; std::cout << "total_time: " << total_time << "\n"; return 0; }
da7db55bbce4d0a21f2fe1bbd3c695a743f4a685.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "cuda_bndp.h" #include "cuda_mparticles.h" #include "cuda_bits.h" #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/scan.h> #include "psc_bits.h" #include <mrc_profile.h> #define THREADS_PER_BLOCK 256 // layout of the spine // lt self rb # from left-top .. self .. right-bottom // 0 1 2 3 4 5 6 7 8 NEW // b0 | | | | | | | | | | | // b1 | | | | | | | | | | | // b2 | | | | | | | | | | | // ... // bn | | | | | | | | | | | // | | | | | | | | | | | | | ... | | # oob // b0 b1 b2 b3 bn #include <cstdio> #include <cassert> // ---------------------------------------------------------------------- // ctor template<typename CudaMparticles, typename DIM> cuda_bndp<CudaMparticles, DIM>::cuda_bndp(const Grid_t& grid) : cuda_mparticles_indexer<BS>(grid) { d_spine_cnts.resize(1 + n_blocks * (CUDA_BND_STRIDE + 1)); d_spine_sums.resize(1 + n_blocks * (CUDA_BND_STRIDE + 1)); bpatch.resize(n_patches); bufs_.reserve(n_patches); for (int p = 0; p < n_patches; p++) { bufs_.push_back(&bpatch[p].buf); } } // ---------------------------------------------------------------------- // prep template<typename CudaMparticles, typename DIM> void cuda_bndp<CudaMparticles, DIM>::prep(CudaMparticles* cmprts) { static int pr_A, pr_B, pr_D, pr_B0, pr_B1; if (!pr_A) { pr_A = prof_register("xchg_bidx", 1., 0, 0); pr_B0= prof_register("xchg_reduce", 1., 0, 0); pr_B1= prof_register("xchg_n_send", 1., 0, 0); pr_B = prof_register("xchg_scan_send", 1., 0, 0); pr_D = prof_register("xchg_from_dev", 1., 0, 0); } //prof_start(pr_A); //cuda_mprts_find_block_keys(mprts); //prof_stop(pr_A); prof_start(pr_B0); spine_reduce(cmprts); prof_stop(pr_B0); prof_start(pr_B1); n_prts_send = find_n_send(cmprts); prof_stop(pr_B1); prof_start(pr_B); scan_send_buf_total(cmprts, n_prts_send); prof_stop(pr_B); prof_start(pr_D); copy_from_dev_and_convert(cmprts, n_prts_send); prof_stop(pr_D); } // ---------------------------------------------------------------------- // post template<typename CudaMparticles, typename DIM> void cuda_bndp<CudaMparticles, DIM>::post(CudaMparticles* cmprts) { static int pr_A, pr_D, pr_E, pr_D1; if (!pr_A) { pr_A = prof_register("xchg_to_dev", 1., 0, 0); pr_D = prof_register("xchg_sort", 1., 0, 0); pr_D1= prof_register("xchg_upd_off", 1., 0, 0); pr_E = prof_register("xchg_reorder", 1., 0, 0); } prof_start(pr_A); uint n_prts_recv = convert_and_copy_to_dev(cmprts); cmprts->n_prts += n_prts_recv; prof_stop(pr_A); prof_start(pr_D); sort_pairs_device(cmprts, n_prts_recv); cmprts->n_prts -= n_prts_send; prof_stop(pr_D); prof_start(pr_D1); update_offsets(cmprts); prof_stop(pr_D1); prof_start(pr_E); #if 0 cmprts->reorder(cmprts); assert(cmprts->check_ordered()); #else cmprts->need_reorder = true; #endif prof_stop(pr_E); } // ---------------------------------------------------------------------- // find_n_send template<typename CudaMparticles, typename DIM> uint cuda_bndp<CudaMparticles, DIM>::find_n_send(CudaMparticles *cmprts) { thrust::host_vector<uint> h_spine_sums(n_blocks + 1); thrust::copy(d_spine_sums.data() + n_blocks * 10, d_spine_sums.data() + n_blocks * 11 + 1, h_spine_sums.begin()); uint off = 0; for (int p = 0; p < n_patches; p++) { uint n_send = h_spine_sums[(p + 1) * n_blocks_per_patch]; bpatch[p].n_send = n_send - off; off = n_send; } return off; } // ---------------------------------------------------------------------- // copy_from_dev_and_convert template<typename CudaMparticles, typename DIM> void cuda_bndp<CudaMparticles, DIM>::copy_from_dev_and_convert(CudaMparticles *cmprts, uint n_prts_send) { uint n_prts = cmprts->n_prts; thrust::host_vector<float4> h_bnd_xi4(n_prts_send); thrust::host_vector<float4> h_bnd_pxi4(n_prts_send); assert(cmprts->d_xi4.begin() + n_prts + n_prts_send == cmprts->d_xi4.end()); thrust::copy(cmprts->d_xi4.begin() + n_prts, cmprts->d_xi4.end(), h_bnd_xi4.begin()); thrust::copy(cmprts->d_pxi4.begin() + n_prts, cmprts->d_pxi4.end(), h_bnd_pxi4.begin()); uint off = 0; for (int p = 0; p < n_patches; p++) { auto& buf = bpatch[p].buf; uint n_send = bpatch[p].n_send; buf.reserve(n_send); buf.resize(n_send); for (int n = 0; n < n_send; n++) { int kind = cuda_float_as_int(h_bnd_xi4[n + off].w); buf[n] = particle_cuda_t{{h_bnd_xi4[n + off].x, h_bnd_xi4[n + off].y, h_bnd_xi4[n + off].z}, {h_bnd_pxi4[n + off].x, h_bnd_pxi4[n + off].y, h_bnd_pxi4[n + off].z}, h_bnd_pxi4[n + off].w / float(cmprts->grid_.kinds[kind].q), kind}; } off += n_send; } cmprts->resize(n_prts); } // ---------------------------------------------------------------------- // convert_and_copy_to_dev template<typename CudaMparticles, typename DIM> uint cuda_bndp<CudaMparticles, DIM>::convert_and_copy_to_dev(CudaMparticles *cmprts) { uint n_recv = 0; for (int p = 0; p < n_patches; p++) { n_recv += bpatch[p].buf.size(); } thrust::host_vector<float4> h_bnd_xi4(n_recv); thrust::host_vector<float4> h_bnd_pxi4(n_recv); thrust::host_vector<uint> h_bnd_idx(n_recv); thrust::host_vector<uint> h_bnd_off(n_recv); thrust::host_vector<uint> h_bnd_cnt(n_blocks, 0); uint off = 0; for (int p = 0; p < n_patches; p++) { int n_recv = bpatch[p].buf.size(); bpatch[p].n_recv = n_recv; for (int n = 0; n < n_recv; n++) { const particle_cuda_t& prt = bpatch[p].buf[n]; h_bnd_xi4[n + off].x = prt.x[0]; h_bnd_xi4[n + off].y = prt.x[1]; h_bnd_xi4[n + off].z = prt.x[2]; h_bnd_xi4[n + off].w = cuda_int_as_float(prt.kind); h_bnd_pxi4[n + off].x = prt.p[0]; h_bnd_pxi4[n + off].y = prt.p[1]; h_bnd_pxi4[n + off].z = prt.p[2]; h_bnd_pxi4[n + off].w = prt.w * cmprts->grid_.kinds[prt.kind].q; checkInPatchMod(&h_bnd_xi4[n + off].x); uint b = blockIndex(h_bnd_xi4[n + off], p); assert(b < n_blocks); h_bnd_idx[n + off] = b; h_bnd_off[n + off] = h_bnd_cnt[b]++; } off += n_recv; } cmprts->resize(cmprts->n_prts + n_recv); thrust::copy(h_bnd_xi4.begin(), h_bnd_xi4.end(), cmprts->d_xi4.begin() + cmprts->n_prts); thrust::copy(h_bnd_pxi4.begin(), h_bnd_pxi4.end(), cmprts->d_pxi4.begin() + cmprts->n_prts); // for consistency, use same block indices that we counted earlier // OPT unneeded? thrust::copy(h_bnd_idx.begin(), h_bnd_idx.end(), cmprts->by_block_.d_idx.begin() + cmprts->n_prts); // slight abuse of the now unused last part of spine_cnts thrust::copy(h_bnd_cnt.begin(), h_bnd_cnt.end(), d_spine_cnts.begin() + 10 * n_blocks); d_bnd_off.resize(n_recv); thrust::copy(h_bnd_off.begin(), h_bnd_off.end(), d_bnd_off.begin()); return n_recv; } // ---------------------------------------------------------------------- // update_offsets __global__ static void mprts_update_offsets(int nr_total_blocks, uint *d_off, uint *d_spine_sums) { int bid = threadIdx.x + THREADS_PER_BLOCK * blockIdx.x; if (bid <= nr_total_blocks) { d_off[bid] = d_spine_sums[bid * CUDA_BND_STRIDE + 0]; } } template<typename CudaMparticles, typename DIM> void cuda_bndp<CudaMparticles, DIM>::update_offsets(CudaMparticles *cmprts) { int dimGrid = (n_blocks + 1 + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; hipLaunchKernelGGL(( mprts_update_offsets), dim3(dimGrid), dim3(THREADS_PER_BLOCK), 0, 0, n_blocks, cmprts->by_block_.d_off.data().get(), d_spine_sums.data().get()); cuda_sync_if_enabled(); } template<typename CudaMparticles, typename DIM> void cuda_bndp<CudaMparticles, DIM>::update_offsets_gold(CudaMparticles *cmprts) { thrust::host_vector<uint> h_spine_sums(d_spine_sums.data(), d_spine_sums.data() + 1 + n_blocks * (10 + 1)); thrust::host_vector<uint> h_off(n_blocks + 1); for (int bid = 0; bid <= n_blocks; bid++) { h_off[bid] = h_spine_sums[bid * 10]; } thrust::copy(h_off.begin(), h_off.end(), cmprts->by_block_.d_off.begin()); } // ---------------------------------------------------------------------- // convert_and_copy_to_dev template<typename CudaMparticles> uint cuda_bndp<CudaMparticles, dim_xyz>::convert_and_copy_to_dev(CudaMparticles* cmprts) { uint n_recv = 0; for (int p = 0; p < n_patches; p++) { n_recv += bpatch[p].buf.size(); } thrust::host_vector<float4> h_bnd_xi4(n_recv); thrust::host_vector<float4> h_bnd_pxi4(n_recv); thrust::host_vector<uint> h_bnd_idx(n_recv); //thrust::host_vector<uint> h_bnd_off(n_recv); thrust::host_vector<uint> h_bnd_cnt(n_blocks, 0); uint off = 0; for (int p = 0; p < n_patches; p++) { int n_recv = bpatch[p].buf.size(); bpatch[p].n_recv = n_recv; for (int n = 0; n < n_recv; n++) { const particle_cuda_t& prt = bpatch[p].buf[n]; h_bnd_xi4[n + off].x = prt.x[0]; h_bnd_xi4[n + off].y = prt.x[1]; h_bnd_xi4[n + off].z = prt.x[2]; h_bnd_xi4[n + off].w = cuda_int_as_float(prt.kind); h_bnd_pxi4[n + off].x = prt.p[0]; h_bnd_pxi4[n + off].y = prt.p[1]; h_bnd_pxi4[n + off].z = prt.p[2]; h_bnd_pxi4[n + off].w = prt.w * cmprts->grid_.kinds[prt.kind].q; checkInPatchMod(&h_bnd_xi4[n + off].x); uint b = blockIndex(h_bnd_xi4[n + off], p); assert(b < n_blocks); h_bnd_idx[n + off] = b; //h_bnd_off[n + off] = h_bnd_cnt[b]++; } off += n_recv; } cmprts->resize(cmprts->n_prts + n_recv); thrust::copy(h_bnd_xi4.begin(), h_bnd_xi4.end(), cmprts->d_xi4.begin() + cmprts->n_prts); thrust::copy(h_bnd_pxi4.begin(), h_bnd_pxi4.end(), cmprts->d_pxi4.begin() + cmprts->n_prts); thrust::copy(h_bnd_idx.begin(), h_bnd_idx.end(), cmprts->by_block_.d_idx.begin() + cmprts->n_prts); // // slight abuse of the now unused last part of spine_cnts // thrust::copy(h_bnd_cnt.begin(), h_bnd_cnt.end(), d_spine_cnts.begin() + 10 * n_blocks); // d_bnd_off.resize(n_recv); // thrust::copy(h_bnd_off.begin(), h_bnd_off.end(), d_bnd_off.begin()); return n_recv; } template<typename CudaMparticles> void cuda_bndp<CudaMparticles, dim_xyz>::post(CudaMparticles* _cmprts) { auto& cmprts = *_cmprts; uint n_prts_recv = convert_and_copy_to_dev(&cmprts); cmprts.n_prts += n_prts_recv; cmprts.resize(cmprts.n_prts); auto& d_bidx = cmprts.by_block_.d_idx; thrust::sequence(cmprts.by_block_.d_id.begin(), cmprts.by_block_.d_id.end()); thrust::stable_sort_by_key(d_bidx.begin(), d_bidx.end(), cmprts.by_block_.d_id.begin()); // find offsets thrust::counting_iterator<uint> search_begin(0); thrust::upper_bound(d_bidx.begin(), d_bidx.end(), search_begin, search_begin + cmprts.n_blocks, cmprts.by_block_.d_off.begin() + 1); // d_off[0] was set to zero during d_off initialization cmprts.need_reorder = true; } template struct cuda_bndp<cuda_mparticles<BS144>, dim_yz>; template struct cuda_bndp<cuda_mparticles<BS444>, dim_xyz>;
da7db55bbce4d0a21f2fe1bbd3c695a743f4a685.cu
#include "cuda_bndp.h" #include "cuda_mparticles.h" #include "cuda_bits.h" #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/scan.h> #include "psc_bits.h" #include <mrc_profile.h> #define THREADS_PER_BLOCK 256 // layout of the spine // lt self rb # from left-top .. self .. right-bottom // 0 1 2 3 4 5 6 7 8 NEW // b0 | | | | | | | | | | | // b1 | | | | | | | | | | | // b2 | | | | | | | | | | | // ... // bn | | | | | | | | | | | // | | | | | | | | | | | | | ... | | # oob // b0 b1 b2 b3 bn #include <cstdio> #include <cassert> // ---------------------------------------------------------------------- // ctor template<typename CudaMparticles, typename DIM> cuda_bndp<CudaMparticles, DIM>::cuda_bndp(const Grid_t& grid) : cuda_mparticles_indexer<BS>(grid) { d_spine_cnts.resize(1 + n_blocks * (CUDA_BND_STRIDE + 1)); d_spine_sums.resize(1 + n_blocks * (CUDA_BND_STRIDE + 1)); bpatch.resize(n_patches); bufs_.reserve(n_patches); for (int p = 0; p < n_patches; p++) { bufs_.push_back(&bpatch[p].buf); } } // ---------------------------------------------------------------------- // prep template<typename CudaMparticles, typename DIM> void cuda_bndp<CudaMparticles, DIM>::prep(CudaMparticles* cmprts) { static int pr_A, pr_B, pr_D, pr_B0, pr_B1; if (!pr_A) { pr_A = prof_register("xchg_bidx", 1., 0, 0); pr_B0= prof_register("xchg_reduce", 1., 0, 0); pr_B1= prof_register("xchg_n_send", 1., 0, 0); pr_B = prof_register("xchg_scan_send", 1., 0, 0); pr_D = prof_register("xchg_from_dev", 1., 0, 0); } //prof_start(pr_A); //cuda_mprts_find_block_keys(mprts); //prof_stop(pr_A); prof_start(pr_B0); spine_reduce(cmprts); prof_stop(pr_B0); prof_start(pr_B1); n_prts_send = find_n_send(cmprts); prof_stop(pr_B1); prof_start(pr_B); scan_send_buf_total(cmprts, n_prts_send); prof_stop(pr_B); prof_start(pr_D); copy_from_dev_and_convert(cmprts, n_prts_send); prof_stop(pr_D); } // ---------------------------------------------------------------------- // post template<typename CudaMparticles, typename DIM> void cuda_bndp<CudaMparticles, DIM>::post(CudaMparticles* cmprts) { static int pr_A, pr_D, pr_E, pr_D1; if (!pr_A) { pr_A = prof_register("xchg_to_dev", 1., 0, 0); pr_D = prof_register("xchg_sort", 1., 0, 0); pr_D1= prof_register("xchg_upd_off", 1., 0, 0); pr_E = prof_register("xchg_reorder", 1., 0, 0); } prof_start(pr_A); uint n_prts_recv = convert_and_copy_to_dev(cmprts); cmprts->n_prts += n_prts_recv; prof_stop(pr_A); prof_start(pr_D); sort_pairs_device(cmprts, n_prts_recv); cmprts->n_prts -= n_prts_send; prof_stop(pr_D); prof_start(pr_D1); update_offsets(cmprts); prof_stop(pr_D1); prof_start(pr_E); #if 0 cmprts->reorder(cmprts); assert(cmprts->check_ordered()); #else cmprts->need_reorder = true; #endif prof_stop(pr_E); } // ---------------------------------------------------------------------- // find_n_send template<typename CudaMparticles, typename DIM> uint cuda_bndp<CudaMparticles, DIM>::find_n_send(CudaMparticles *cmprts) { thrust::host_vector<uint> h_spine_sums(n_blocks + 1); thrust::copy(d_spine_sums.data() + n_blocks * 10, d_spine_sums.data() + n_blocks * 11 + 1, h_spine_sums.begin()); uint off = 0; for (int p = 0; p < n_patches; p++) { uint n_send = h_spine_sums[(p + 1) * n_blocks_per_patch]; bpatch[p].n_send = n_send - off; off = n_send; } return off; } // ---------------------------------------------------------------------- // copy_from_dev_and_convert template<typename CudaMparticles, typename DIM> void cuda_bndp<CudaMparticles, DIM>::copy_from_dev_and_convert(CudaMparticles *cmprts, uint n_prts_send) { uint n_prts = cmprts->n_prts; thrust::host_vector<float4> h_bnd_xi4(n_prts_send); thrust::host_vector<float4> h_bnd_pxi4(n_prts_send); assert(cmprts->d_xi4.begin() + n_prts + n_prts_send == cmprts->d_xi4.end()); thrust::copy(cmprts->d_xi4.begin() + n_prts, cmprts->d_xi4.end(), h_bnd_xi4.begin()); thrust::copy(cmprts->d_pxi4.begin() + n_prts, cmprts->d_pxi4.end(), h_bnd_pxi4.begin()); uint off = 0; for (int p = 0; p < n_patches; p++) { auto& buf = bpatch[p].buf; uint n_send = bpatch[p].n_send; buf.reserve(n_send); buf.resize(n_send); for (int n = 0; n < n_send; n++) { int kind = cuda_float_as_int(h_bnd_xi4[n + off].w); buf[n] = particle_cuda_t{{h_bnd_xi4[n + off].x, h_bnd_xi4[n + off].y, h_bnd_xi4[n + off].z}, {h_bnd_pxi4[n + off].x, h_bnd_pxi4[n + off].y, h_bnd_pxi4[n + off].z}, h_bnd_pxi4[n + off].w / float(cmprts->grid_.kinds[kind].q), kind}; } off += n_send; } cmprts->resize(n_prts); } // ---------------------------------------------------------------------- // convert_and_copy_to_dev template<typename CudaMparticles, typename DIM> uint cuda_bndp<CudaMparticles, DIM>::convert_and_copy_to_dev(CudaMparticles *cmprts) { uint n_recv = 0; for (int p = 0; p < n_patches; p++) { n_recv += bpatch[p].buf.size(); } thrust::host_vector<float4> h_bnd_xi4(n_recv); thrust::host_vector<float4> h_bnd_pxi4(n_recv); thrust::host_vector<uint> h_bnd_idx(n_recv); thrust::host_vector<uint> h_bnd_off(n_recv); thrust::host_vector<uint> h_bnd_cnt(n_blocks, 0); uint off = 0; for (int p = 0; p < n_patches; p++) { int n_recv = bpatch[p].buf.size(); bpatch[p].n_recv = n_recv; for (int n = 0; n < n_recv; n++) { const particle_cuda_t& prt = bpatch[p].buf[n]; h_bnd_xi4[n + off].x = prt.x[0]; h_bnd_xi4[n + off].y = prt.x[1]; h_bnd_xi4[n + off].z = prt.x[2]; h_bnd_xi4[n + off].w = cuda_int_as_float(prt.kind); h_bnd_pxi4[n + off].x = prt.p[0]; h_bnd_pxi4[n + off].y = prt.p[1]; h_bnd_pxi4[n + off].z = prt.p[2]; h_bnd_pxi4[n + off].w = prt.w * cmprts->grid_.kinds[prt.kind].q; checkInPatchMod(&h_bnd_xi4[n + off].x); uint b = blockIndex(h_bnd_xi4[n + off], p); assert(b < n_blocks); h_bnd_idx[n + off] = b; h_bnd_off[n + off] = h_bnd_cnt[b]++; } off += n_recv; } cmprts->resize(cmprts->n_prts + n_recv); thrust::copy(h_bnd_xi4.begin(), h_bnd_xi4.end(), cmprts->d_xi4.begin() + cmprts->n_prts); thrust::copy(h_bnd_pxi4.begin(), h_bnd_pxi4.end(), cmprts->d_pxi4.begin() + cmprts->n_prts); // for consistency, use same block indices that we counted earlier // OPT unneeded? thrust::copy(h_bnd_idx.begin(), h_bnd_idx.end(), cmprts->by_block_.d_idx.begin() + cmprts->n_prts); // slight abuse of the now unused last part of spine_cnts thrust::copy(h_bnd_cnt.begin(), h_bnd_cnt.end(), d_spine_cnts.begin() + 10 * n_blocks); d_bnd_off.resize(n_recv); thrust::copy(h_bnd_off.begin(), h_bnd_off.end(), d_bnd_off.begin()); return n_recv; } // ---------------------------------------------------------------------- // update_offsets __global__ static void mprts_update_offsets(int nr_total_blocks, uint *d_off, uint *d_spine_sums) { int bid = threadIdx.x + THREADS_PER_BLOCK * blockIdx.x; if (bid <= nr_total_blocks) { d_off[bid] = d_spine_sums[bid * CUDA_BND_STRIDE + 0]; } } template<typename CudaMparticles, typename DIM> void cuda_bndp<CudaMparticles, DIM>::update_offsets(CudaMparticles *cmprts) { int dimGrid = (n_blocks + 1 + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; mprts_update_offsets<<<dimGrid, THREADS_PER_BLOCK>>> (n_blocks, cmprts->by_block_.d_off.data().get(), d_spine_sums.data().get()); cuda_sync_if_enabled(); } template<typename CudaMparticles, typename DIM> void cuda_bndp<CudaMparticles, DIM>::update_offsets_gold(CudaMparticles *cmprts) { thrust::host_vector<uint> h_spine_sums(d_spine_sums.data(), d_spine_sums.data() + 1 + n_blocks * (10 + 1)); thrust::host_vector<uint> h_off(n_blocks + 1); for (int bid = 0; bid <= n_blocks; bid++) { h_off[bid] = h_spine_sums[bid * 10]; } thrust::copy(h_off.begin(), h_off.end(), cmprts->by_block_.d_off.begin()); } // ---------------------------------------------------------------------- // convert_and_copy_to_dev template<typename CudaMparticles> uint cuda_bndp<CudaMparticles, dim_xyz>::convert_and_copy_to_dev(CudaMparticles* cmprts) { uint n_recv = 0; for (int p = 0; p < n_patches; p++) { n_recv += bpatch[p].buf.size(); } thrust::host_vector<float4> h_bnd_xi4(n_recv); thrust::host_vector<float4> h_bnd_pxi4(n_recv); thrust::host_vector<uint> h_bnd_idx(n_recv); //thrust::host_vector<uint> h_bnd_off(n_recv); thrust::host_vector<uint> h_bnd_cnt(n_blocks, 0); uint off = 0; for (int p = 0; p < n_patches; p++) { int n_recv = bpatch[p].buf.size(); bpatch[p].n_recv = n_recv; for (int n = 0; n < n_recv; n++) { const particle_cuda_t& prt = bpatch[p].buf[n]; h_bnd_xi4[n + off].x = prt.x[0]; h_bnd_xi4[n + off].y = prt.x[1]; h_bnd_xi4[n + off].z = prt.x[2]; h_bnd_xi4[n + off].w = cuda_int_as_float(prt.kind); h_bnd_pxi4[n + off].x = prt.p[0]; h_bnd_pxi4[n + off].y = prt.p[1]; h_bnd_pxi4[n + off].z = prt.p[2]; h_bnd_pxi4[n + off].w = prt.w * cmprts->grid_.kinds[prt.kind].q; checkInPatchMod(&h_bnd_xi4[n + off].x); uint b = blockIndex(h_bnd_xi4[n + off], p); assert(b < n_blocks); h_bnd_idx[n + off] = b; //h_bnd_off[n + off] = h_bnd_cnt[b]++; } off += n_recv; } cmprts->resize(cmprts->n_prts + n_recv); thrust::copy(h_bnd_xi4.begin(), h_bnd_xi4.end(), cmprts->d_xi4.begin() + cmprts->n_prts); thrust::copy(h_bnd_pxi4.begin(), h_bnd_pxi4.end(), cmprts->d_pxi4.begin() + cmprts->n_prts); thrust::copy(h_bnd_idx.begin(), h_bnd_idx.end(), cmprts->by_block_.d_idx.begin() + cmprts->n_prts); // // slight abuse of the now unused last part of spine_cnts // thrust::copy(h_bnd_cnt.begin(), h_bnd_cnt.end(), d_spine_cnts.begin() + 10 * n_blocks); // d_bnd_off.resize(n_recv); // thrust::copy(h_bnd_off.begin(), h_bnd_off.end(), d_bnd_off.begin()); return n_recv; } template<typename CudaMparticles> void cuda_bndp<CudaMparticles, dim_xyz>::post(CudaMparticles* _cmprts) { auto& cmprts = *_cmprts; uint n_prts_recv = convert_and_copy_to_dev(&cmprts); cmprts.n_prts += n_prts_recv; cmprts.resize(cmprts.n_prts); auto& d_bidx = cmprts.by_block_.d_idx; thrust::sequence(cmprts.by_block_.d_id.begin(), cmprts.by_block_.d_id.end()); thrust::stable_sort_by_key(d_bidx.begin(), d_bidx.end(), cmprts.by_block_.d_id.begin()); // find offsets thrust::counting_iterator<uint> search_begin(0); thrust::upper_bound(d_bidx.begin(), d_bidx.end(), search_begin, search_begin + cmprts.n_blocks, cmprts.by_block_.d_off.begin() + 1); // d_off[0] was set to zero during d_off initialization cmprts.need_reorder = true; } template struct cuda_bndp<cuda_mparticles<BS144>, dim_yz>; template struct cuda_bndp<cuda_mparticles<BS444>, dim_xyz>;
d1a41be86d65c161a289ef21b3d6f097e1e2762e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2020-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cuco/static_map.cuh> #include <thrust/device_vector.h> #include <thrust/execution_policy.h> #include <thrust/iterator/zip_iterator.h> #include <thrust/logical.h> #include <thrust/sequence.h> #include <thrust/tuple.h> #include <cmath> #include <cstddef> #include <iostream> #include <limits> /** * @file device_view_example.cu * @brief Demonstrates usage of the device side APIs for individual operations like insert/find. * * Individual operations like a single insert or find can be performed in device code via the * static_map "device_view" types. Note that concurrent insert and find are not supported, and * therefore there are separate view types for insert and find to help prevent undefined behavior. * * @note This example is for demonstration purposes only. It is not intended to show the most * performant way to do the example algorithm. * */ /** * @brief Inserts keys that pass the specified predicated into the map. * * @tparam Map Type of the map returned from static_map::get_device_mutable_view * @tparam KeyIter Input iterator whose value_type convertible to Map::key_type * @tparam ValueIter Input iterator whose value_type is convertible to Map::mapped_type * @tparam Predicate Unary predicate * * @param[in] map_view View of the map into which inserts will be performed * @param[in] key_begin The beginning of the range of keys to insert * @param[in] value_begin The beginning of the range of values associated with each key to insert * @param[in] num_keys The total number of keys and values * @param[in] pred Unary predicate applied to each key. Only keys that pass the predicated will be * inserted. * @param[out] num_inserted The total number of keys successfully inserted */ template <typename Map, typename KeyIter, typename ValueIter, typename Predicate> __global__ void filtered_insert(Map map_view, KeyIter key_begin, ValueIter value_begin, std::size_t num_keys, Predicate pred, int* num_inserted) { auto tid = threadIdx.x + blockIdx.x * blockDim.x; std::size_t counter = 0; while (tid < num_keys) { // Only insert keys that pass the predicate if (pred(key_begin[tid])) { // device_mutable_view::insert returns `true` if it is the first time the given key was // inserted and `false` if the key already existed if (map_view.insert({key_begin[tid], value_begin[tid]})) { ++counter; // Count number of successfully inserted keys } } tid += gridDim.x * blockDim.x; } // Update global count of inserted keys atomicAdd(num_inserted, counter); } /** * @brief For keys that have a match in the map, increments their corresponding value by one. * * @tparam Map Type of the map returned from static_map::get_device_view * @tparam KeyIter Input iterator whose value_type convertible to Map::key_type * * @param map_view View of the map into which queries will be performed * @param key_begin The beginning of the range of keys to query * @param num_keys The total number of keys */ template <typename Map, typename KeyIter> __global__ void increment_values(Map map_view, KeyIter key_begin, std::size_t num_keys) { auto tid = threadIdx.x + blockIdx.x * blockDim.x; while (tid < num_keys) { // If the key exists in the map, find returns an iterator to the specified key. Otherwise it // returns map.end() auto found = map_view.find(key_begin[tid]); if (found != map_view.end()) { // If the key exists, atomically increment the associated value // The value type of the iterator is pair<cuda::atomic<Key>, cuda::atomic<Value>> found->second.fetch_add(1, cuda::memory_order_relaxed); } tid += gridDim.x * blockDim.x; } } int main(void) { using Key = int; using Value = int; // Empty slots are represented by reserved "sentinel" values. These values should be selected such // that they never occur in your input data. Key constexpr empty_key_sentinel = -1; Value constexpr empty_value_sentinel = -1; // Number of key/value pairs to be inserted std::size_t constexpr num_keys = 50'000; // Create a sequence of keys and values {{0,0}, {1,1}, ... {i,i}} thrust::device_vector<Key> insert_keys(num_keys); thrust::sequence(insert_keys.begin(), insert_keys.end(), 0); thrust::device_vector<Value> insert_values(num_keys); thrust::sequence(insert_values.begin(), insert_values.end(), 0); // Compute capacity based on a 50% load factor auto constexpr load_factor = 0.5; std::size_t const capacity = ::ceil(num_keys / load_factor); // Constructs a map with "capacity" slots using -1 and -1 as the empty key/value sentinels. cuco::static_map<Key, Value> map{ capacity, cuco::empty_key{empty_key_sentinel}, cuco::empty_value{empty_value_sentinel}}; // Get a non-owning, mutable view of the map that allows inserts to pass by value into the kernel auto device_insert_view = map.get_device_mutable_view(); // Predicate will only insert even keys auto is_even = [] __device__(auto key) { return (key % 2) == 0; }; // Allocate storage for count of number of inserted keys thrust::device_vector<int> num_inserted(1); auto constexpr block_size = 256; auto const grid_size = (num_keys + block_size - 1) / block_size; hipLaunchKernelGGL(( filtered_insert), dim3(grid_size), dim3(block_size), 0, 0, device_insert_view, insert_keys.begin(), insert_values.begin(), num_keys, is_even, num_inserted.data().get()); std::cout << "Number of keys inserted: " << num_inserted[0] << std::endl; // Get a non-owning view of the map that allows find operations to pass by value into the kernel auto device_find_view = map.get_device_view(); hipLaunchKernelGGL(( increment_values), dim3(grid_size), dim3(block_size), 0, 0, device_find_view, insert_keys.begin(), num_keys); // Retrieve contents of all the non-empty slots in the map thrust::device_vector<Key> contained_keys(num_inserted[0]); thrust::device_vector<Value> contained_values(num_inserted[0]); map.retrieve_all(contained_keys.begin(), contained_values.begin()); auto tuple_iter = thrust::make_zip_iterator(thrust::make_tuple(contained_keys.begin(), contained_values.begin())); // Iterate over all slot contents and verify that `slot.key + 1 == slot.value` is always true. auto result = thrust::all_of( thrust::device, tuple_iter, tuple_iter + num_inserted[0], [] __device__(auto const& tuple) { return thrust::get<0>(tuple) + 1 == thrust::get<1>(tuple); }); if (result) { std::cout << "Success! Target values are properly incremented.\n"; } return 0; }
d1a41be86d65c161a289ef21b3d6f097e1e2762e.cu
/* * Copyright (c) 2020-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cuco/static_map.cuh> #include <thrust/device_vector.h> #include <thrust/execution_policy.h> #include <thrust/iterator/zip_iterator.h> #include <thrust/logical.h> #include <thrust/sequence.h> #include <thrust/tuple.h> #include <cmath> #include <cstddef> #include <iostream> #include <limits> /** * @file device_view_example.cu * @brief Demonstrates usage of the device side APIs for individual operations like insert/find. * * Individual operations like a single insert or find can be performed in device code via the * static_map "device_view" types. Note that concurrent insert and find are not supported, and * therefore there are separate view types for insert and find to help prevent undefined behavior. * * @note This example is for demonstration purposes only. It is not intended to show the most * performant way to do the example algorithm. * */ /** * @brief Inserts keys that pass the specified predicated into the map. * * @tparam Map Type of the map returned from static_map::get_device_mutable_view * @tparam KeyIter Input iterator whose value_type convertible to Map::key_type * @tparam ValueIter Input iterator whose value_type is convertible to Map::mapped_type * @tparam Predicate Unary predicate * * @param[in] map_view View of the map into which inserts will be performed * @param[in] key_begin The beginning of the range of keys to insert * @param[in] value_begin The beginning of the range of values associated with each key to insert * @param[in] num_keys The total number of keys and values * @param[in] pred Unary predicate applied to each key. Only keys that pass the predicated will be * inserted. * @param[out] num_inserted The total number of keys successfully inserted */ template <typename Map, typename KeyIter, typename ValueIter, typename Predicate> __global__ void filtered_insert(Map map_view, KeyIter key_begin, ValueIter value_begin, std::size_t num_keys, Predicate pred, int* num_inserted) { auto tid = threadIdx.x + blockIdx.x * blockDim.x; std::size_t counter = 0; while (tid < num_keys) { // Only insert keys that pass the predicate if (pred(key_begin[tid])) { // device_mutable_view::insert returns `true` if it is the first time the given key was // inserted and `false` if the key already existed if (map_view.insert({key_begin[tid], value_begin[tid]})) { ++counter; // Count number of successfully inserted keys } } tid += gridDim.x * blockDim.x; } // Update global count of inserted keys atomicAdd(num_inserted, counter); } /** * @brief For keys that have a match in the map, increments their corresponding value by one. * * @tparam Map Type of the map returned from static_map::get_device_view * @tparam KeyIter Input iterator whose value_type convertible to Map::key_type * * @param map_view View of the map into which queries will be performed * @param key_begin The beginning of the range of keys to query * @param num_keys The total number of keys */ template <typename Map, typename KeyIter> __global__ void increment_values(Map map_view, KeyIter key_begin, std::size_t num_keys) { auto tid = threadIdx.x + blockIdx.x * blockDim.x; while (tid < num_keys) { // If the key exists in the map, find returns an iterator to the specified key. Otherwise it // returns map.end() auto found = map_view.find(key_begin[tid]); if (found != map_view.end()) { // If the key exists, atomically increment the associated value // The value type of the iterator is pair<cuda::atomic<Key>, cuda::atomic<Value>> found->second.fetch_add(1, cuda::memory_order_relaxed); } tid += gridDim.x * blockDim.x; } } int main(void) { using Key = int; using Value = int; // Empty slots are represented by reserved "sentinel" values. These values should be selected such // that they never occur in your input data. Key constexpr empty_key_sentinel = -1; Value constexpr empty_value_sentinel = -1; // Number of key/value pairs to be inserted std::size_t constexpr num_keys = 50'000; // Create a sequence of keys and values {{0,0}, {1,1}, ... {i,i}} thrust::device_vector<Key> insert_keys(num_keys); thrust::sequence(insert_keys.begin(), insert_keys.end(), 0); thrust::device_vector<Value> insert_values(num_keys); thrust::sequence(insert_values.begin(), insert_values.end(), 0); // Compute capacity based on a 50% load factor auto constexpr load_factor = 0.5; std::size_t const capacity = std::ceil(num_keys / load_factor); // Constructs a map with "capacity" slots using -1 and -1 as the empty key/value sentinels. cuco::static_map<Key, Value> map{ capacity, cuco::empty_key{empty_key_sentinel}, cuco::empty_value{empty_value_sentinel}}; // Get a non-owning, mutable view of the map that allows inserts to pass by value into the kernel auto device_insert_view = map.get_device_mutable_view(); // Predicate will only insert even keys auto is_even = [] __device__(auto key) { return (key % 2) == 0; }; // Allocate storage for count of number of inserted keys thrust::device_vector<int> num_inserted(1); auto constexpr block_size = 256; auto const grid_size = (num_keys + block_size - 1) / block_size; filtered_insert<<<grid_size, block_size>>>(device_insert_view, insert_keys.begin(), insert_values.begin(), num_keys, is_even, num_inserted.data().get()); std::cout << "Number of keys inserted: " << num_inserted[0] << std::endl; // Get a non-owning view of the map that allows find operations to pass by value into the kernel auto device_find_view = map.get_device_view(); increment_values<<<grid_size, block_size>>>(device_find_view, insert_keys.begin(), num_keys); // Retrieve contents of all the non-empty slots in the map thrust::device_vector<Key> contained_keys(num_inserted[0]); thrust::device_vector<Value> contained_values(num_inserted[0]); map.retrieve_all(contained_keys.begin(), contained_values.begin()); auto tuple_iter = thrust::make_zip_iterator(thrust::make_tuple(contained_keys.begin(), contained_values.begin())); // Iterate over all slot contents and verify that `slot.key + 1 == slot.value` is always true. auto result = thrust::all_of( thrust::device, tuple_iter, tuple_iter + num_inserted[0], [] __device__(auto const& tuple) { return thrust::get<0>(tuple) + 1 == thrust::get<1>(tuple); }); if (result) { std::cout << "Success! Target values are properly incremented.\n"; } return 0; }
64f892c79a35d80139aae75be4cc9e550e0a7c28.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" extern "C" { } __global__ void relu(const int n, const double *a, double *b) { int i = threadIdx.x + blockIdx.x * blockDim.x; if (i<n) { if (a[i]>0.0) {b[i] = a[i];} else {b[i] = 0.0;} } }
64f892c79a35d80139aae75be4cc9e550e0a7c28.cu
#include "includes.h" extern "C" { } __global__ void relu(const int n, const double *a, double *b) { int i = threadIdx.x + blockIdx.x * blockDim.x; if (i<n) { if (a[i]>0.0) {b[i] = a[i];} else {b[i] = 0.0;} } }
c02ec9e51395f070c4a14db6fee6a50d34371e5f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void VectorInputDiffKernel( float *input, int inputSize, float *referenceVector, int maxCells, float *difference ) { int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid + blockDim.x*blockIdx.x //blocks preceeding current block + threadIdx.x; if(threadId < maxCells * inputSize) { difference[threadId] = input[threadId % inputSize] - referenceVector[threadId]; } }
c02ec9e51395f070c4a14db6fee6a50d34371e5f.cu
#include "includes.h" __global__ void VectorInputDiffKernel( float *input, int inputSize, float *referenceVector, int maxCells, float *difference ) { int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid + blockDim.x*blockIdx.x //blocks preceeding current block + threadIdx.x; if(threadId < maxCells * inputSize) { difference[threadId] = input[threadId % inputSize] - referenceVector[threadId]; } }
e85928703c988fdc9fda1710a408400631f681de.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hiprand/hiprand_kernel.h> #include <stdio.h> #include <MatKernel.hpp> #include <MurmurHash.hpp> static const int dtsignbit = 0x80000000; static const int dtmag = 0x7fffffff; __forceinline__ __device__ int getFloatBits(float val, int fshift) { int ival = *((int *)&val); if (ival & dtsignbit) { ival = -(ival & dtmag); } ival += dtsignbit; ival = ((unsigned int)ival) >> fshift; return ival; } __forceinline__ __device__ int getFloatBits(int ival, int fshift) { return ival; } #define DBSIZE (8*1024) // threadIdx.x is the feature index // threadIdx.y is the tree index // blockIdx.x and blockIdx.y index blocks of columns template <typename S, typename T> __global__ void __treePack(S *fdata, int *treenodes, T *icats, long long *out, int *fieldlens, int nrows, int ncols, int ntrees, int nsamps, int seed) { __shared__ S fbuff[DBSIZE]; __shared__ int fl[32]; int i, j, ic; int tid = threadIdx.x + blockDim.x * threadIdx.y; if (tid < 6) { fl[tid] = fieldlens[tid]; } __syncthreads(); int vshift = fl[5]; int ishift = fl[4] + vshift; int jshift = fl[3] + ishift; int nshift = fl[2] + jshift; int tshift = fl[1] + nshift; int cmask = (1 << fl[5]) - 1; int vmask = (1 << fl[4]) - 1; int imask = (1 << fl[3]) - 1; int jmask = (1 << fl[2]) - 1; int nmask = (1 << fl[1]) - 1; int tmask = (1 << fl[0]) - 1; int nc = (DBSIZE / nrows); int itree = threadIdx.y; int jfeat = threadIdx.x; int fshift = 32 - fl[4]; for (i = nc * blockIdx.x; i < ncols; i += nc * gridDim.x) { int ctodo = min(nc, ncols - i); for (j = tid; j < nrows * ctodo; j += blockDim.x*blockDim.y) { fbuff[j] = fdata[j + i * nrows]; } __syncthreads(); for (j = i; j < i + ctodo; j++) { // j is the column index ic = (int)icats[j]; for (itree = threadIdx.y; itree < ntrees; itree += blockDim.y) { if (jfeat < nsamps) { int inode0 = treenodes[itree + j * ntrees]; int inode = inode0 & 0x7fffffff; long long isign = ((long long)((inode0 & dtsignbit) ^ dtsignbit)) << 32; int ifeat = mmhash3(itree, inode, jfeat, nrows, seed); S v = fbuff[ifeat + (j - i) * nrows]; int ival = getFloatBits(v, fshift); long long hdr = (((long long)(tmask & itree)) << tshift) | (((long long)(nmask & inode)) << nshift) | (((long long)(jmask & jfeat)) << jshift) | (((long long)(imask & ifeat)) << ishift) | (((long long)(vmask & ival)) << vshift) | ((long long)(ic & cmask)) | isign; out[jfeat + nsamps * (itree + ntrees * j)] = hdr; } } } __syncthreads(); } } int treePack(float *fdata, int *treenodes, int *icats, long long *out, int *fieldlens, int nrows, int ncols, int ntrees, int nsamps, int seed) { int ntx = 32 * (1 + (nsamps - 1)/32); int nty = min(1024 / ntx, ntrees); dim3 bdim(ntx, nty, 1); int nb = min(32, 1 + (ncols-1)/32); hipLaunchKernelGGL(( __treePack<float,int>), dim3(nb),dim3(bdim), 0, 0, fdata, treenodes, icats, out, fieldlens, nrows, ncols, ntrees, nsamps, seed); hipDeviceSynchronize(); hipError_t err = hipGetLastError(); return err; } int treePackfc(float *fdata, int *treenodes, float *icats, long long *out, int *fieldlens, int nrows, int ncols, int ntrees, int nsamps, int seed) { int ntx = 32 * (1 + (nsamps - 1)/32); int nty = min(1024 / ntx, ntrees); dim3 bdim(ntx, nty, 1); int nb = min(32, 1 + (ncols-1)/32); hipLaunchKernelGGL(( __treePack<float,float>), dim3(nb),dim3(bdim), 0, 0, fdata, treenodes, icats, out, fieldlens, nrows, ncols, ntrees, nsamps, seed); hipDeviceSynchronize(); hipError_t err = hipGetLastError(); return err; } int treePackInt(int *fdata, int *treenodes, int *icats, long long *out, int *fieldlens, int nrows, int ncols, int ntrees, int nsamps, int seed) { int ntx = 32 * (1 + (nsamps - 1)/32); int nty = min(1024 / ntx, ntrees); dim3 bdim(ntx, nty, 1); int nb = min(32, 1 + (ncols-1)/32); hipLaunchKernelGGL(( __treePack<int,int>), dim3(nb),dim3(bdim), 0, 0, fdata, treenodes, icats, out, fieldlens, nrows, ncols, ntrees, nsamps, seed); hipDeviceSynchronize(); hipError_t err = hipGetLastError(); return err; } // threadIdx.x is the tree index // threadIdx.y is a column index // blockIdx.x and blockIdx.y index blocks of columns __global__ void __treeWalk(float *fdata, int *inodes, float *fnodes, int *itrees, int *ftrees, int *vtrees, float *ctrees, int nrows, int ncols, int ntrees, int nnodes, int getcat, int nbits, int nlevels) { __shared__ float fbuff[DBSIZE]; int i, j, k, itree, inode, ipos, ftree, vtree, ifeat, ichild, big; float ctree, feat; int nc = (DBSIZE / nrows); int fshift = 32 - nbits; int tid = threadIdx.x + blockDim.x * threadIdx.y; int bid = blockIdx.x + gridDim.x * blockIdx.y; int nblocks = gridDim.x * gridDim.y; int nthreads = blockDim.x * blockDim.y; for (i = nc * bid; i < ncols; i += nc * nblocks) { // i is a global block column index int ctodo = min(nc, ncols - i); // Fill up the SHMEM buffer with nc columns from fdata __syncthreads(); for (j = tid; j < nrows * ctodo; j += nthreads) { fbuff[j] = fdata[j + i * nrows]; } __syncthreads(); for (j = threadIdx.y; j < ctodo; j += blockDim.y) { // j is the (local SHMEM) column index for (itree = threadIdx.x; itree < ntrees; itree += blockDim.x) { // itree indexes the trees inode = 0; // points to the current node ipos = itree * nnodes; // address in the tree arrays of this node for (k = 0; k < nlevels; k++) { ichild = itrees[ipos]; // left child index vtree = vtrees[ipos]; // and threshold if (vtree == -2) { // non-splittable node, so mark inode inode = inode | dtsignbit; } if (ichild == 0 || vtree == -2) break; // this is a leaf, so break ftree = ftrees[ipos]; // otherwise get split feature index feat = fbuff[ftree + j * nrows]; // get the feature pointed to ifeat = getFloatBits(feat, fshift); big = ifeat > vtree; // compare with the threshold inode = ichild + big; // address of left child in the block ipos = inode + itree * nnodes; // address in the tree arrays of this node } if (getcat) { // save the leaf node index or the label ctree = ctrees[ipos]; fnodes[itree + (i + j) * ntrees] = ctree; } else { inodes[itree + (i + j) * ntrees] = inode; } } } __syncthreads(); } } int treeWalk(float *fdata, int *inodes, float *fnodes, int *itrees, int *ftrees, int *vtrees, float *ctrees, int nrows, int ncols, int ntrees, int nnodes, int getcat, int nbits, int nlevels) { int nc = DBSIZE / nrows; int xthreads = min(ntrees,1024); int ythreads = min(nc,1024/xthreads); dim3 threaddims(xthreads, ythreads, 1); int nblocks = 1 + (ncols-1) / 8 / nc; int yblocks = 1 + (nblocks-1)/65536; int xblocks = 1 + (nblocks-1)/yblocks; dim3 blockdims(xblocks, yblocks, 1); // printf("nrows %d, ncols %d, ntrees %d, nnodes %d, getcat %d, nbits %d, nlevels %d, xthreads %d, ythreads %d, xblocks %d, yblocks %d\n", // nrows, ncols, ntrees, nnodes, getcat, nbits, nlevels, xthreads, ythreads, xblocks, yblocks); hipLaunchKernelGGL(( __treeWalk), dim3(blockdims),dim3(threaddims), 0, 0, fdata, inodes, fnodes, itrees, ftrees, vtrees, ctrees, nrows, ncols, ntrees, nnodes, getcat, nbits, nlevels); hipDeviceSynchronize(); hipError_t err = hipGetLastError(); return err; } class entImpty { public: static __device__ inline float fupdate(int v) { return (float)v * logf((float)max(1, v)); } static __device__ inline float fresult(float vacc, int vsum) { float vs = (float)max(1, vsum); return logf(vs) - vacc / vs; } }; class giniImpty { public: static __device__ inline float fupdate(int v) { return (float)v * (float)v; } static __device__ inline float fresult(float vacc, int vsum) { float vs = (float)max(1, vsum); return 1.0f - vacc / (vs*vs); } }; #if __CUDA_ARCH__ >= 300 __device__ inline void accumup2(int &cnt, float &update) { #pragma unroll for (int h = 1; h < 32; h = h + h) { float tmpx = __shfl_up(update, h); int tmp = __shfl_up(cnt, h); if (threadIdx.x >=h) { update += tmpx; cnt += tmp; } } } __device__ inline void accumup3(int &cnt, float &update, float &updatet) { #pragma unroll for (int h = 1; h < 32; h = h + h) { float tmpx = __shfl_up(update, h); float tmpy = __shfl_up(updatet, h); int tmp = __shfl_up(cnt, h); if (threadIdx.x >=h) { update += tmpx; updatet += tmpy; cnt += tmp; } } } __device__ inline void accumdown3(int &cnt, float &update, float &updatet, int bound) { #pragma unroll for (int h = 1; h < 32; h = h + h) { float tmpx = __shfl_down(update, h); float tmpy = __shfl_down(updatet, h); int tmp = __shfl_down(cnt, h); if (threadIdx.x + h <= bound) { update += tmpx; updatet += tmpy; cnt += tmp; } } } __device__ inline void minup2(float &impty, int &ival) { #pragma unroll for (int h = 1; h < 32; h = h + h) { float tmpx = __shfl_up(impty, h); int tmp = __shfl_up(ival, h); if (threadIdx.x >= h && tmpx < impty) { impty = tmpx; ival = tmp; } } } __device__ inline void maxup2(int &v, int &indx) { #pragma unroll for (int h = 1; h < 32; h = h + h) { int tmpv = __shfl_up(v, h); int tmpi = __shfl_up(indx, h); if (threadIdx.x >= h && tmpv > v) { v = tmpv; indx = tmpi; } } } template<typename T> __global__ void __minImpuritya(long long *keys, int *counts, int *outv, int *outf, float *outg, int *outc, int *jc, int *fieldlens, int nnodes, int ncats, int nsamps) { __shared__ int catcnt[DBSIZE/2]; __shared__ int cattot[DBSIZE/2]; int tid = threadIdx.x + blockDim.x * threadIdx.y; if (tid < 6) { catcnt[tid] = fieldlens[tid]; } __syncthreads(); int vshift = catcnt[5]; int ishift = catcnt[4] + vshift; int cmask = (1 << catcnt[5]) - 1; int vmask = (1 << catcnt[4]) - 1; int imask = (1 << catcnt[3]) - 1; __syncthreads(); int i, j, k, jc0, jc1, jlast; long long key; int cold, ctot, ctt, ctotall, cnew, cnt, ival, icat, lastival, bestival, tmp, maxcnt, imaxcnt; float update, updatet, cacc, cact, caccall, impty, minimpty, lastimpty, tmpx; for (i = threadIdx.y + blockDim.y * blockIdx.x; i < nnodes*nsamps; i += blockDim.y * gridDim.x) { // Process a group with fixed itree, inode, and ifeat jc0 = jc[i]; // The range of indices for this group jc1 = jc[i+1]; __syncthreads(); // Clear the cat counts for this group for (j = tid; j < DBSIZE/2; j += blockDim.x * blockDim.y) { catcnt[j] = 0; cattot[j] = 0; } __syncthreads(); // First pass gets counts for each category and the (ci)log(ci) sum for this block ctot = 0; cacc = 0.0f; maxcnt = -1; imaxcnt = -1; for (j = jc0; j < jc1; j += blockDim.x) { if (j + threadIdx.x < jc1) { // Read a block of (32) keys and counts key = keys[j + threadIdx.x]; // Each (x) thread handles a different input cnt = counts[j + threadIdx.x]; icat = ((int)key) & cmask; // Extract the cat id and integer value } jlast = min(31, jc1 - j - 1); for (k = 0; k <= jlast; k++) { // Sequentially update counts so that each thread if (threadIdx.x == k) { // in this warp gets the old and new counts cold = cattot[icat + ncats * threadIdx.y]; // i.e. data for item k is in thread k cnew = cold + cnt; cattot[icat + ncats * threadIdx.y] = cnew; } } update = T::fupdate(cnew) - T::fupdate(cold); accumup2(cnt,update); ctot += cnt; // Now update the total c and total ci log ci sums cacc += update; ctot = __shfl(ctot, jlast); cacc = __shfl(cacc, jlast); if (cnew > maxcnt) { // Compute and distribute the max cnt maxcnt = cnew; imaxcnt = icat; } maxup2(maxcnt, imaxcnt); maxcnt = __shfl(maxcnt, jlast); imaxcnt = __shfl(imaxcnt, jlast); } __syncthreads(); // if (threadIdx.x == 0 && i < 32) printf("cuda %d %d %f\n", i, ctot, cacc); // Second pass to compute impurity at every input point caccall = cacc; // Save the total count and (ci)log(ci) sum cact = cacc; ctotall = ctot; ctot = 0; cacc = 0.0f; lastival = -1; lastimpty = 1e7f; minimpty = 1e7f; for (j = jc0; j < jc1; j += blockDim.x) { if (j + threadIdx.x < jc1) { // Read a block of (32) keys and counts key = keys[j + threadIdx.x]; // Each (x) thread handles a different input cnt = counts[j + threadIdx.x]; icat = ((int)key) & cmask; // Extract the cat id and integer value ival = ((int)(key >> vshift)) & vmask; } jlast = min(31, jc1 - j - 1); for (k = 0; k <= jlast; k++) { // Sequentially update counts so that each thread if (threadIdx.x == k) { // in this warp gets the old and new counts cold = catcnt[icat + ncats * threadIdx.y]; // i.e. data for item k is in thread k ctt = cattot[icat + ncats * threadIdx.y]; cnew = cold + cnt; catcnt[icat + ncats * threadIdx.y] = cnew; } } update = T::fupdate(cnew) - T::fupdate(cold); // Compute the impurity updates for this input updatet = T::fupdate(ctt-cnew) - T::fupdate(ctt-cold); accumup3(cnt, update, updatet); ctot += cnt; // Now update the total c and total ci log ci sums cacc += update; cact += updatet; impty = T::fresult(cacc, ctot) + T::fresult(cact, ctotall-ctot); // And the impurity for this input // if (i == 0) printf("cuda pos %d impty %f icat %d cnts %d %d cacc %f %d\n", j + threadIdx.x, impty, icat, cold, cnew, cacc, ctot); tmp = __shfl_up(ival, 1); // Need the last impurity and ival in order tmpx = __shfl_up(impty, 1); // to restrict the partition feature to a value boundary if (threadIdx.x > 0) { lastival = tmp; lastimpty = tmpx; } if (ival == lastival) lastimpty = 1e7f; // Eliminate values which are not at value boundaries if (lastimpty < minimpty) { minimpty = lastimpty; bestival = ival; } minup2(minimpty,bestival); minimpty = __shfl(minimpty, jlast); // Carefully copy the last active thread to all threads, needed outside this loop bestival = __shfl(bestival, jlast); ctot = __shfl(ctot, jlast); cacc = __shfl(cacc, jlast); cact = __shfl(cact, jlast); lastival = __shfl(ival, jlast); lastimpty = __shfl(impty, jlast); } if (threadIdx.x == 0) { outv[i] = bestival; // Output the best split feature value outf[i] = ((int)(key >> ishift)) & imask; // Save the feature index outg[i] = T::fresult(caccall, ctotall) - minimpty; // And the impurity gain outc[i] = imaxcnt; } } } template<typename T> __global__ void __minImpurityb(long long *keys, int *counts, int *outv, int *outf, float *outg, int *outc, int *jc, int *fieldlens, int nnodes, int ncats, int nsamps) { __shared__ int catcnt[DBSIZE]; __shared__ int cattot[DBSIZE/4]; __shared__ int stott[32]; __shared__ float sacct[32]; __shared__ int slastival[64]; __shared__ int sbestival[32]; __shared__ float sminimpty[32]; int tid = threadIdx.x + blockDim.x * threadIdx.y; if (tid < 6) { catcnt[tid] = fieldlens[tid]; } __syncthreads(); int vshift = catcnt[5]; int ishift = catcnt[4] + vshift; int cmask = (1 << catcnt[5]) - 1; int vmask = (1 << catcnt[4]) - 1; int imask = (1 << catcnt[3]) - 1; __syncthreads(); int i, j, k, h, jc0, jc1, ilast, jlast; long long key; int cold, tot, ctt, tott, cnew, cnt, ncnt, tcnt, ival, icat, lastival, bestival, tmp; float update, updatet, acc, acct, impty, minimpty; for (i = blockIdx.x; i < nnodes*nsamps; i += gridDim.x) { // Process a group with fixed itree, inode, and ifeat jc0 = jc[i]; // The range of indices for this group jc1 = jc[i+1]; __syncthreads(); // Clear the cat counts and totals for (j = threadIdx.x; j < ncats; j += blockDim.x) { catcnt[j + threadIdx.y * blockDim.x] = 0; if (threadIdx.y == 0) cattot[j] = 0; } if (threadIdx.y == 0) { sminimpty[threadIdx.x] = 1e7f; sbestival[threadIdx.x] = -1; } __syncthreads(); // First pass gets counts for each category and the (ci)log(ci) sum for this entire ifeat group for (j = jc0; j < jc1; j += blockDim.x * blockDim.x) { if (j + tid < jc1) { // Read a block of keys and counts key = keys[j + tid]; cnt = counts[j + tid]; icat = ((int)key) & cmask; // Extract the cat id atomicAdd(&cattot[icat + threadIdx.y * ncats], cnt); // Update count totals } } __syncthreads(); tott = 0; // Compute total count and (c)log(c) for the entire ifeat group acct = 0; if (threadIdx.y == 0) { for (k = 0; k < ncats; k += blockDim.x) { if (k + threadIdx.x < ncats) { tcnt = cattot[k + threadIdx.x]; update = T::fupdate(tcnt); } else { tcnt = 0; update = 0; } accumup2(tcnt,update); ilast = min(31, ncats - k - 1); tcnt = __shfl(tcnt, ilast); update = __shfl(update, ilast); tott += tcnt; acct += update; } stott[threadIdx.x] = tott; sacct[threadIdx.x] = acct; } tott = stott[threadIdx.x]; // if (tid == 0 && i < 32) printf("cuda %d %d %f\n", i, tott, acct); // Main loop, work on blocks of 1024 (ideally) for (j = jc0; j < jc1; j += blockDim.x * blockDim.x) { for (k = 0; k < ncats; k += blockDim.x) { // copy cumcounts from last row of last iteration to the first row tmp = catcnt[k + threadIdx.x + (blockDim.y -1) * ncats]; __syncthreads(); if (threadIdx.y == 0) { catcnt[k + threadIdx.x] = tmp; } else { catcnt[k + threadIdx.x + threadIdx.y * ncats] = 0; } __syncthreads(); } if (j + tid < jc1) { // Read a block of keys and counts key = keys[j + tid]; cnt = counts[j + tid]; icat = ((int)key) & cmask; // Extract the cat id and integer value; ival = ((int)(key >> vshift)) & vmask; atomicAdd(&catcnt[icat + threadIdx.y * ncats], cnt); // Update count totals } jlast = min(31, jc1 - j - threadIdx.y * 32 - 1); // Save the last value in this group if (threadIdx.x == jlast) { slastival[threadIdx.y + 1] = ival; } __syncthreads(); for (k = 0; k < ncats; k += blockDim.x) { // Form the cumsum along columns of catcnts for (h = 1; h < blockDim.y; h = h + h) { if (k + threadIdx.x < ncats && blockIdx.y + h < blockDim.y) { tmp = catcnt[k + threadIdx.x + ncats * threadIdx.y]; } __syncthreads(); if (k + threadIdx.x < ncats && blockIdx.y + h < blockDim.y) { catcnt[k + threadIdx.x + ncats * (threadIdx.y + h)] += tmp; } __syncthreads(); } } tot = 0; // Local to a yblock (row) of catcnts acc = 0.0f; acct = 0.0f; for (k = 0; k < ncats; k += blockDim.x) { // Now sum within a row (yblock) if (k + threadIdx.x < ncats) { cnt = catcnt[k + threadIdx.x + threadIdx.y * ncats]; update = T::fupdate(cnt); updatet = T::fupdate(cattot[k + threadIdx.x] - cnt); } else { cnt = 0; update = 0; updatet = 0; } accumup3(cnt,update,updatet); ilast = min(31, ncats - k - 1); update = __shfl(update, ilast); updatet = __shfl(updatet, ilast); cnt = __shfl(cnt, ilast); tot += cnt; acc += update; acct += updatet; } __syncthreads(); // OK, we have everything needed now to compute impurity for the rows in this yblock: // tot, acc, acct at the end of the block lastival = -1; minimpty = 1e7f; ncnt = -cnt; for (k = jlast; k >= 0; k--) { // Sequentially update counts so that each thread if (threadIdx.x == k) { // in this warp gets the old and new counts cold = catcnt[icat + ncats * threadIdx.y]; // i.e. data for item k is in thread k ctt = cattot[icat + ncats * threadIdx.y]; cnew = cold + ncnt; catcnt[icat + ncats * threadIdx.y] = cnew; } } update = T::fupdate(cnew) - T::fupdate(cold); updatet = T::fupdate(ctt - cnew) - T::fupdate(ctt - cold); accumdown3(ncnt,update,updatet,jlast); tot += cnt; // Now update the total c and total ci log ci sums acc += update; acct += updatet; impty = T::fresult(acc, tot) + T::fresult(acct, tott - tot); // And the impurity for this input tmp = __shfl_up(ival, 1); if (threadIdx.x > 0) { // Get the last ival to check for a boundary lastival = tmp; } else { lastival = slastival[threadIdx.y]; } __syncthreads(); if (tid == 0) { tmp = slastival[33]; slastival[0] = tmp; } __syncthreads(); if (ival == lastival) impty = 1e7f; // Eliminate values which are not at value boundaries if (impty < minimpty) { minimpty = impty; bestival = ival; } minup2(minimpty,bestival); minimpty = __shfl(minimpty, jlast); bestival = __shfl(bestival, jlast); if (threadIdx.x == 0) { sminimpty[threadIdx.y] = minimpty; sbestival[threadIdx.y] = bestival; } __syncthreads(); if (threadIdx.y == 0) { minimpty = sminimpty[threadIdx.x]; bestival = sbestival[threadIdx.x]; minup2(minimpty,bestival); minimpty = __shfl(minimpty, blockDim.y - 1); bestival = __shfl(bestival, blockDim.y - 1); sminimpty[threadIdx.x] = minimpty; sbestival[threadIdx.x] = bestival; } __syncthreads(); } if (tid == 0) { outv[i] = bestival; // Output the best split feature value outf[i] = (int)((key >> ishift) & imask); // Save the feature index // outg[i] = T::fresult(sacct[0], tott) - minimpty; // And the impurity gain outg[i] = T::fresult(sacct[0], tott); // And the impurity gain } __syncthreads(); } } #else template<class T> __global__ void __minImpuritya(long long *keys, int *counts, int *outv, int *outf, float *outg, int *outc, int *jc, int *fieldlens, int nnodes, int ncats, int nsamps) {} template<class T> __global__ void __minImpurityb(long long *keys, int *counts, int *outv, int *outf, float *outg, int *outc, int *jc, int *fieldlens, int nnodes, int ncats, int nsamps) {} #endif int minImpurity(long long *keys, int *counts, int *outv, int *outf, float *outg, int *outc, int *jc, int *fieldlens, int nnodes, int ncats, int nsamps, int impType) { // Note: its safe to round ncats up to a multiple of 32, since its only used to split shmem int ny = min(32, DBSIZE/ncats/2); dim3 tdim(32, ny, 1); int ng = min(64, nnodes*nsamps); if ((impType & 2) == 0) { if ((impType & 1) == 0) { hipLaunchKernelGGL(( __minImpuritya<entImpty>), dim3(ng),dim3(tdim), 0, 0, keys, counts, outv, outf, outg, outc, jc, fieldlens, nnodes, ncats, nsamps); } else { hipLaunchKernelGGL(( __minImpuritya<giniImpty>), dim3(ng),dim3(tdim), 0, 0, keys, counts, outv, outf, outg, outc, jc, fieldlens, nnodes, ncats, nsamps); } } else { if ((impType & 1) == 0) { hipLaunchKernelGGL(( __minImpurityb<entImpty>), dim3(ng),dim3(tdim), 0, 0, keys, counts, outv, outf, outg, outc, jc, fieldlens, nnodes, ncats, nsamps); } else { hipLaunchKernelGGL(( __minImpurityb<giniImpty>), dim3(ng),dim3(tdim), 0, 0, keys, counts, outv, outf, outg, outc, jc, fieldlens, nnodes, ncats, nsamps); } } fflush(stdout); hipDeviceSynchronize(); hipError_t err = hipGetLastError(); return err; } __global__ void __findBoundaries(long long *keys, int *jc, int n, int njc, int shift) { __shared__ int dbuff[1024]; int i, j, iv, lasti; int imin = ((int)(32 * ((((long long)n) * blockIdx.x) / (gridDim.x * 32)))); int imax = min(n, ((int)(32 * ((((long long)n) * (blockIdx.x + 1)) / (gridDim.x * 32) + 1)))); int tid = threadIdx.x + blockDim.x * threadIdx.y; if (tid == 0 && blockIdx.x == 0) { jc[0] = 0; } __syncthreads(); lasti = 0x7fffffff; for (i = imin; i <= imax; i += blockDim.x * blockDim.y) { iv = njc; if (i + tid < imax) { iv = (int)(keys[i + tid] >> shift); dbuff[tid] = iv; } __syncthreads(); if (i + tid < imax || i + tid == n) { if (tid > 0) lasti = dbuff[tid - 1]; if (iv > lasti) { for (j = lasti+1; j <= iv; j++) { jc[j] = i + tid; } } if (tid == 0) { lasti = dbuff[blockDim.x * blockDim.y - 1]; } } __syncthreads(); } } int findBoundaries(long long *keys, int *jc, int n, int njc, int shift) { int ny = min(32, 1 + (n-1)/32); dim3 tdim(32, ny, 1); int ng = min(64, 1+n/32/ny); hipLaunchKernelGGL(( __findBoundaries), dim3(ng),dim3(tdim), 0, 0, keys, jc, n, njc, shift); hipDeviceSynchronize(); hipError_t err = hipGetLastError(); return err; } __global__ void __floatToInt(int n, float *in, int *out, int nbits) { int fshift = 32 - nbits; int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); for (int i = ip; i < n; i += blockDim.x * gridDim.x * gridDim.y) { float v = in[i]; int ival = getFloatBits(v, fshift); out[i] = ival; } } int floatToInt(int n, float *in, int *out, int nbits) { int nthreads; dim3 griddims; setsizes(n, &griddims, &nthreads); hipLaunchKernelGGL(( __floatToInt), dim3(griddims),dim3(nthreads), 0, 0, n, in, out, nbits); hipDeviceSynchronize(); hipError_t err = hipGetLastError(); return err; } __global__ void __jfeatsToIfeats(int itree, int *inodes, int *jfeats, int *ifeats, int n, int nfeats, int seed) { int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); for (int i = ip; i < n; i += blockDim.x * gridDim.x * gridDim.y) { int inode = inodes[i]; int jfeat = jfeats[i]; int ifeat = mmhash3(itree, inode, jfeat, nfeats, seed); ifeats[i] = ifeat; } } int jfeatsToIfeats(int itree, int *inodes, int *jfeats, int *ifeats, int n, int nfeats, int seed) { int nthreads; dim3 griddims; setsizes(n, &griddims, &nthreads); hipLaunchKernelGGL(( __jfeatsToIfeats), dim3(griddims),dim3(nthreads), 0, 0, itree, inodes, jfeats, ifeats, n, nfeats, seed); hipDeviceSynchronize(); hipError_t err = hipGetLastError(); return err; }
e85928703c988fdc9fda1710a408400631f681de.cu
#include <cuda_runtime.h> #include <curand_kernel.h> #include <stdio.h> #include <MatKernel.hpp> #include <MurmurHash.hpp> static const int dtsignbit = 0x80000000; static const int dtmag = 0x7fffffff; __forceinline__ __device__ int getFloatBits(float val, int fshift) { int ival = *((int *)&val); if (ival & dtsignbit) { ival = -(ival & dtmag); } ival += dtsignbit; ival = ((unsigned int)ival) >> fshift; return ival; } __forceinline__ __device__ int getFloatBits(int ival, int fshift) { return ival; } #define DBSIZE (8*1024) // threadIdx.x is the feature index // threadIdx.y is the tree index // blockIdx.x and blockIdx.y index blocks of columns template <typename S, typename T> __global__ void __treePack(S *fdata, int *treenodes, T *icats, long long *out, int *fieldlens, int nrows, int ncols, int ntrees, int nsamps, int seed) { __shared__ S fbuff[DBSIZE]; __shared__ int fl[32]; int i, j, ic; int tid = threadIdx.x + blockDim.x * threadIdx.y; if (tid < 6) { fl[tid] = fieldlens[tid]; } __syncthreads(); int vshift = fl[5]; int ishift = fl[4] + vshift; int jshift = fl[3] + ishift; int nshift = fl[2] + jshift; int tshift = fl[1] + nshift; int cmask = (1 << fl[5]) - 1; int vmask = (1 << fl[4]) - 1; int imask = (1 << fl[3]) - 1; int jmask = (1 << fl[2]) - 1; int nmask = (1 << fl[1]) - 1; int tmask = (1 << fl[0]) - 1; int nc = (DBSIZE / nrows); int itree = threadIdx.y; int jfeat = threadIdx.x; int fshift = 32 - fl[4]; for (i = nc * blockIdx.x; i < ncols; i += nc * gridDim.x) { int ctodo = min(nc, ncols - i); for (j = tid; j < nrows * ctodo; j += blockDim.x*blockDim.y) { fbuff[j] = fdata[j + i * nrows]; } __syncthreads(); for (j = i; j < i + ctodo; j++) { // j is the column index ic = (int)icats[j]; for (itree = threadIdx.y; itree < ntrees; itree += blockDim.y) { if (jfeat < nsamps) { int inode0 = treenodes[itree + j * ntrees]; int inode = inode0 & 0x7fffffff; long long isign = ((long long)((inode0 & dtsignbit) ^ dtsignbit)) << 32; int ifeat = mmhash3(itree, inode, jfeat, nrows, seed); S v = fbuff[ifeat + (j - i) * nrows]; int ival = getFloatBits(v, fshift); long long hdr = (((long long)(tmask & itree)) << tshift) | (((long long)(nmask & inode)) << nshift) | (((long long)(jmask & jfeat)) << jshift) | (((long long)(imask & ifeat)) << ishift) | (((long long)(vmask & ival)) << vshift) | ((long long)(ic & cmask)) | isign; out[jfeat + nsamps * (itree + ntrees * j)] = hdr; } } } __syncthreads(); } } int treePack(float *fdata, int *treenodes, int *icats, long long *out, int *fieldlens, int nrows, int ncols, int ntrees, int nsamps, int seed) { int ntx = 32 * (1 + (nsamps - 1)/32); int nty = min(1024 / ntx, ntrees); dim3 bdim(ntx, nty, 1); int nb = min(32, 1 + (ncols-1)/32); __treePack<float,int><<<nb,bdim>>>(fdata, treenodes, icats, out, fieldlens, nrows, ncols, ntrees, nsamps, seed); cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError(); return err; } int treePackfc(float *fdata, int *treenodes, float *icats, long long *out, int *fieldlens, int nrows, int ncols, int ntrees, int nsamps, int seed) { int ntx = 32 * (1 + (nsamps - 1)/32); int nty = min(1024 / ntx, ntrees); dim3 bdim(ntx, nty, 1); int nb = min(32, 1 + (ncols-1)/32); __treePack<float,float><<<nb,bdim>>>(fdata, treenodes, icats, out, fieldlens, nrows, ncols, ntrees, nsamps, seed); cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError(); return err; } int treePackInt(int *fdata, int *treenodes, int *icats, long long *out, int *fieldlens, int nrows, int ncols, int ntrees, int nsamps, int seed) { int ntx = 32 * (1 + (nsamps - 1)/32); int nty = min(1024 / ntx, ntrees); dim3 bdim(ntx, nty, 1); int nb = min(32, 1 + (ncols-1)/32); __treePack<int,int><<<nb,bdim>>>(fdata, treenodes, icats, out, fieldlens, nrows, ncols, ntrees, nsamps, seed); cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError(); return err; } // threadIdx.x is the tree index // threadIdx.y is a column index // blockIdx.x and blockIdx.y index blocks of columns __global__ void __treeWalk(float *fdata, int *inodes, float *fnodes, int *itrees, int *ftrees, int *vtrees, float *ctrees, int nrows, int ncols, int ntrees, int nnodes, int getcat, int nbits, int nlevels) { __shared__ float fbuff[DBSIZE]; int i, j, k, itree, inode, ipos, ftree, vtree, ifeat, ichild, big; float ctree, feat; int nc = (DBSIZE / nrows); int fshift = 32 - nbits; int tid = threadIdx.x + blockDim.x * threadIdx.y; int bid = blockIdx.x + gridDim.x * blockIdx.y; int nblocks = gridDim.x * gridDim.y; int nthreads = blockDim.x * blockDim.y; for (i = nc * bid; i < ncols; i += nc * nblocks) { // i is a global block column index int ctodo = min(nc, ncols - i); // Fill up the SHMEM buffer with nc columns from fdata __syncthreads(); for (j = tid; j < nrows * ctodo; j += nthreads) { fbuff[j] = fdata[j + i * nrows]; } __syncthreads(); for (j = threadIdx.y; j < ctodo; j += blockDim.y) { // j is the (local SHMEM) column index for (itree = threadIdx.x; itree < ntrees; itree += blockDim.x) { // itree indexes the trees inode = 0; // points to the current node ipos = itree * nnodes; // address in the tree arrays of this node for (k = 0; k < nlevels; k++) { ichild = itrees[ipos]; // left child index vtree = vtrees[ipos]; // and threshold if (vtree == -2) { // non-splittable node, so mark inode inode = inode | dtsignbit; } if (ichild == 0 || vtree == -2) break; // this is a leaf, so break ftree = ftrees[ipos]; // otherwise get split feature index feat = fbuff[ftree + j * nrows]; // get the feature pointed to ifeat = getFloatBits(feat, fshift); big = ifeat > vtree; // compare with the threshold inode = ichild + big; // address of left child in the block ipos = inode + itree * nnodes; // address in the tree arrays of this node } if (getcat) { // save the leaf node index or the label ctree = ctrees[ipos]; fnodes[itree + (i + j) * ntrees] = ctree; } else { inodes[itree + (i + j) * ntrees] = inode; } } } __syncthreads(); } } int treeWalk(float *fdata, int *inodes, float *fnodes, int *itrees, int *ftrees, int *vtrees, float *ctrees, int nrows, int ncols, int ntrees, int nnodes, int getcat, int nbits, int nlevels) { int nc = DBSIZE / nrows; int xthreads = min(ntrees,1024); int ythreads = min(nc,1024/xthreads); dim3 threaddims(xthreads, ythreads, 1); int nblocks = 1 + (ncols-1) / 8 / nc; int yblocks = 1 + (nblocks-1)/65536; int xblocks = 1 + (nblocks-1)/yblocks; dim3 blockdims(xblocks, yblocks, 1); // printf("nrows %d, ncols %d, ntrees %d, nnodes %d, getcat %d, nbits %d, nlevels %d, xthreads %d, ythreads %d, xblocks %d, yblocks %d\n", // nrows, ncols, ntrees, nnodes, getcat, nbits, nlevels, xthreads, ythreads, xblocks, yblocks); __treeWalk<<<blockdims,threaddims>>>(fdata, inodes, fnodes, itrees, ftrees, vtrees, ctrees, nrows, ncols, ntrees, nnodes, getcat, nbits, nlevels); cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError(); return err; } class entImpty { public: static __device__ inline float fupdate(int v) { return (float)v * logf((float)max(1, v)); } static __device__ inline float fresult(float vacc, int vsum) { float vs = (float)max(1, vsum); return logf(vs) - vacc / vs; } }; class giniImpty { public: static __device__ inline float fupdate(int v) { return (float)v * (float)v; } static __device__ inline float fresult(float vacc, int vsum) { float vs = (float)max(1, vsum); return 1.0f - vacc / (vs*vs); } }; #if __CUDA_ARCH__ >= 300 __device__ inline void accumup2(int &cnt, float &update) { #pragma unroll for (int h = 1; h < 32; h = h + h) { float tmpx = __shfl_up(update, h); int tmp = __shfl_up(cnt, h); if (threadIdx.x >=h) { update += tmpx; cnt += tmp; } } } __device__ inline void accumup3(int &cnt, float &update, float &updatet) { #pragma unroll for (int h = 1; h < 32; h = h + h) { float tmpx = __shfl_up(update, h); float tmpy = __shfl_up(updatet, h); int tmp = __shfl_up(cnt, h); if (threadIdx.x >=h) { update += tmpx; updatet += tmpy; cnt += tmp; } } } __device__ inline void accumdown3(int &cnt, float &update, float &updatet, int bound) { #pragma unroll for (int h = 1; h < 32; h = h + h) { float tmpx = __shfl_down(update, h); float tmpy = __shfl_down(updatet, h); int tmp = __shfl_down(cnt, h); if (threadIdx.x + h <= bound) { update += tmpx; updatet += tmpy; cnt += tmp; } } } __device__ inline void minup2(float &impty, int &ival) { #pragma unroll for (int h = 1; h < 32; h = h + h) { float tmpx = __shfl_up(impty, h); int tmp = __shfl_up(ival, h); if (threadIdx.x >= h && tmpx < impty) { impty = tmpx; ival = tmp; } } } __device__ inline void maxup2(int &v, int &indx) { #pragma unroll for (int h = 1; h < 32; h = h + h) { int tmpv = __shfl_up(v, h); int tmpi = __shfl_up(indx, h); if (threadIdx.x >= h && tmpv > v) { v = tmpv; indx = tmpi; } } } template<typename T> __global__ void __minImpuritya(long long *keys, int *counts, int *outv, int *outf, float *outg, int *outc, int *jc, int *fieldlens, int nnodes, int ncats, int nsamps) { __shared__ int catcnt[DBSIZE/2]; __shared__ int cattot[DBSIZE/2]; int tid = threadIdx.x + blockDim.x * threadIdx.y; if (tid < 6) { catcnt[tid] = fieldlens[tid]; } __syncthreads(); int vshift = catcnt[5]; int ishift = catcnt[4] + vshift; int cmask = (1 << catcnt[5]) - 1; int vmask = (1 << catcnt[4]) - 1; int imask = (1 << catcnt[3]) - 1; __syncthreads(); int i, j, k, jc0, jc1, jlast; long long key; int cold, ctot, ctt, ctotall, cnew, cnt, ival, icat, lastival, bestival, tmp, maxcnt, imaxcnt; float update, updatet, cacc, cact, caccall, impty, minimpty, lastimpty, tmpx; for (i = threadIdx.y + blockDim.y * blockIdx.x; i < nnodes*nsamps; i += blockDim.y * gridDim.x) { // Process a group with fixed itree, inode, and ifeat jc0 = jc[i]; // The range of indices for this group jc1 = jc[i+1]; __syncthreads(); // Clear the cat counts for this group for (j = tid; j < DBSIZE/2; j += blockDim.x * blockDim.y) { catcnt[j] = 0; cattot[j] = 0; } __syncthreads(); // First pass gets counts for each category and the (ci)log(ci) sum for this block ctot = 0; cacc = 0.0f; maxcnt = -1; imaxcnt = -1; for (j = jc0; j < jc1; j += blockDim.x) { if (j + threadIdx.x < jc1) { // Read a block of (32) keys and counts key = keys[j + threadIdx.x]; // Each (x) thread handles a different input cnt = counts[j + threadIdx.x]; icat = ((int)key) & cmask; // Extract the cat id and integer value } jlast = min(31, jc1 - j - 1); for (k = 0; k <= jlast; k++) { // Sequentially update counts so that each thread if (threadIdx.x == k) { // in this warp gets the old and new counts cold = cattot[icat + ncats * threadIdx.y]; // i.e. data for item k is in thread k cnew = cold + cnt; cattot[icat + ncats * threadIdx.y] = cnew; } } update = T::fupdate(cnew) - T::fupdate(cold); accumup2(cnt,update); ctot += cnt; // Now update the total c and total ci log ci sums cacc += update; ctot = __shfl(ctot, jlast); cacc = __shfl(cacc, jlast); if (cnew > maxcnt) { // Compute and distribute the max cnt maxcnt = cnew; imaxcnt = icat; } maxup2(maxcnt, imaxcnt); maxcnt = __shfl(maxcnt, jlast); imaxcnt = __shfl(imaxcnt, jlast); } __syncthreads(); // if (threadIdx.x == 0 && i < 32) printf("cuda %d %d %f\n", i, ctot, cacc); // Second pass to compute impurity at every input point caccall = cacc; // Save the total count and (ci)log(ci) sum cact = cacc; ctotall = ctot; ctot = 0; cacc = 0.0f; lastival = -1; lastimpty = 1e7f; minimpty = 1e7f; for (j = jc0; j < jc1; j += blockDim.x) { if (j + threadIdx.x < jc1) { // Read a block of (32) keys and counts key = keys[j + threadIdx.x]; // Each (x) thread handles a different input cnt = counts[j + threadIdx.x]; icat = ((int)key) & cmask; // Extract the cat id and integer value ival = ((int)(key >> vshift)) & vmask; } jlast = min(31, jc1 - j - 1); for (k = 0; k <= jlast; k++) { // Sequentially update counts so that each thread if (threadIdx.x == k) { // in this warp gets the old and new counts cold = catcnt[icat + ncats * threadIdx.y]; // i.e. data for item k is in thread k ctt = cattot[icat + ncats * threadIdx.y]; cnew = cold + cnt; catcnt[icat + ncats * threadIdx.y] = cnew; } } update = T::fupdate(cnew) - T::fupdate(cold); // Compute the impurity updates for this input updatet = T::fupdate(ctt-cnew) - T::fupdate(ctt-cold); accumup3(cnt, update, updatet); ctot += cnt; // Now update the total c and total ci log ci sums cacc += update; cact += updatet; impty = T::fresult(cacc, ctot) + T::fresult(cact, ctotall-ctot); // And the impurity for this input // if (i == 0) printf("cuda pos %d impty %f icat %d cnts %d %d cacc %f %d\n", j + threadIdx.x, impty, icat, cold, cnew, cacc, ctot); tmp = __shfl_up(ival, 1); // Need the last impurity and ival in order tmpx = __shfl_up(impty, 1); // to restrict the partition feature to a value boundary if (threadIdx.x > 0) { lastival = tmp; lastimpty = tmpx; } if (ival == lastival) lastimpty = 1e7f; // Eliminate values which are not at value boundaries if (lastimpty < minimpty) { minimpty = lastimpty; bestival = ival; } minup2(minimpty,bestival); minimpty = __shfl(minimpty, jlast); // Carefully copy the last active thread to all threads, needed outside this loop bestival = __shfl(bestival, jlast); ctot = __shfl(ctot, jlast); cacc = __shfl(cacc, jlast); cact = __shfl(cact, jlast); lastival = __shfl(ival, jlast); lastimpty = __shfl(impty, jlast); } if (threadIdx.x == 0) { outv[i] = bestival; // Output the best split feature value outf[i] = ((int)(key >> ishift)) & imask; // Save the feature index outg[i] = T::fresult(caccall, ctotall) - minimpty; // And the impurity gain outc[i] = imaxcnt; } } } template<typename T> __global__ void __minImpurityb(long long *keys, int *counts, int *outv, int *outf, float *outg, int *outc, int *jc, int *fieldlens, int nnodes, int ncats, int nsamps) { __shared__ int catcnt[DBSIZE]; __shared__ int cattot[DBSIZE/4]; __shared__ int stott[32]; __shared__ float sacct[32]; __shared__ int slastival[64]; __shared__ int sbestival[32]; __shared__ float sminimpty[32]; int tid = threadIdx.x + blockDim.x * threadIdx.y; if (tid < 6) { catcnt[tid] = fieldlens[tid]; } __syncthreads(); int vshift = catcnt[5]; int ishift = catcnt[4] + vshift; int cmask = (1 << catcnt[5]) - 1; int vmask = (1 << catcnt[4]) - 1; int imask = (1 << catcnt[3]) - 1; __syncthreads(); int i, j, k, h, jc0, jc1, ilast, jlast; long long key; int cold, tot, ctt, tott, cnew, cnt, ncnt, tcnt, ival, icat, lastival, bestival, tmp; float update, updatet, acc, acct, impty, minimpty; for (i = blockIdx.x; i < nnodes*nsamps; i += gridDim.x) { // Process a group with fixed itree, inode, and ifeat jc0 = jc[i]; // The range of indices for this group jc1 = jc[i+1]; __syncthreads(); // Clear the cat counts and totals for (j = threadIdx.x; j < ncats; j += blockDim.x) { catcnt[j + threadIdx.y * blockDim.x] = 0; if (threadIdx.y == 0) cattot[j] = 0; } if (threadIdx.y == 0) { sminimpty[threadIdx.x] = 1e7f; sbestival[threadIdx.x] = -1; } __syncthreads(); // First pass gets counts for each category and the (ci)log(ci) sum for this entire ifeat group for (j = jc0; j < jc1; j += blockDim.x * blockDim.x) { if (j + tid < jc1) { // Read a block of keys and counts key = keys[j + tid]; cnt = counts[j + tid]; icat = ((int)key) & cmask; // Extract the cat id atomicAdd(&cattot[icat + threadIdx.y * ncats], cnt); // Update count totals } } __syncthreads(); tott = 0; // Compute total count and (c)log(c) for the entire ifeat group acct = 0; if (threadIdx.y == 0) { for (k = 0; k < ncats; k += blockDim.x) { if (k + threadIdx.x < ncats) { tcnt = cattot[k + threadIdx.x]; update = T::fupdate(tcnt); } else { tcnt = 0; update = 0; } accumup2(tcnt,update); ilast = min(31, ncats - k - 1); tcnt = __shfl(tcnt, ilast); update = __shfl(update, ilast); tott += tcnt; acct += update; } stott[threadIdx.x] = tott; sacct[threadIdx.x] = acct; } tott = stott[threadIdx.x]; // if (tid == 0 && i < 32) printf("cuda %d %d %f\n", i, tott, acct); // Main loop, work on blocks of 1024 (ideally) for (j = jc0; j < jc1; j += blockDim.x * blockDim.x) { for (k = 0; k < ncats; k += blockDim.x) { // copy cumcounts from last row of last iteration to the first row tmp = catcnt[k + threadIdx.x + (blockDim.y -1) * ncats]; __syncthreads(); if (threadIdx.y == 0) { catcnt[k + threadIdx.x] = tmp; } else { catcnt[k + threadIdx.x + threadIdx.y * ncats] = 0; } __syncthreads(); } if (j + tid < jc1) { // Read a block of keys and counts key = keys[j + tid]; cnt = counts[j + tid]; icat = ((int)key) & cmask; // Extract the cat id and integer value; ival = ((int)(key >> vshift)) & vmask; atomicAdd(&catcnt[icat + threadIdx.y * ncats], cnt); // Update count totals } jlast = min(31, jc1 - j - threadIdx.y * 32 - 1); // Save the last value in this group if (threadIdx.x == jlast) { slastival[threadIdx.y + 1] = ival; } __syncthreads(); for (k = 0; k < ncats; k += blockDim.x) { // Form the cumsum along columns of catcnts for (h = 1; h < blockDim.y; h = h + h) { if (k + threadIdx.x < ncats && blockIdx.y + h < blockDim.y) { tmp = catcnt[k + threadIdx.x + ncats * threadIdx.y]; } __syncthreads(); if (k + threadIdx.x < ncats && blockIdx.y + h < blockDim.y) { catcnt[k + threadIdx.x + ncats * (threadIdx.y + h)] += tmp; } __syncthreads(); } } tot = 0; // Local to a yblock (row) of catcnts acc = 0.0f; acct = 0.0f; for (k = 0; k < ncats; k += blockDim.x) { // Now sum within a row (yblock) if (k + threadIdx.x < ncats) { cnt = catcnt[k + threadIdx.x + threadIdx.y * ncats]; update = T::fupdate(cnt); updatet = T::fupdate(cattot[k + threadIdx.x] - cnt); } else { cnt = 0; update = 0; updatet = 0; } accumup3(cnt,update,updatet); ilast = min(31, ncats - k - 1); update = __shfl(update, ilast); updatet = __shfl(updatet, ilast); cnt = __shfl(cnt, ilast); tot += cnt; acc += update; acct += updatet; } __syncthreads(); // OK, we have everything needed now to compute impurity for the rows in this yblock: // tot, acc, acct at the end of the block lastival = -1; minimpty = 1e7f; ncnt = -cnt; for (k = jlast; k >= 0; k--) { // Sequentially update counts so that each thread if (threadIdx.x == k) { // in this warp gets the old and new counts cold = catcnt[icat + ncats * threadIdx.y]; // i.e. data for item k is in thread k ctt = cattot[icat + ncats * threadIdx.y]; cnew = cold + ncnt; catcnt[icat + ncats * threadIdx.y] = cnew; } } update = T::fupdate(cnew) - T::fupdate(cold); updatet = T::fupdate(ctt - cnew) - T::fupdate(ctt - cold); accumdown3(ncnt,update,updatet,jlast); tot += cnt; // Now update the total c and total ci log ci sums acc += update; acct += updatet; impty = T::fresult(acc, tot) + T::fresult(acct, tott - tot); // And the impurity for this input tmp = __shfl_up(ival, 1); if (threadIdx.x > 0) { // Get the last ival to check for a boundary lastival = tmp; } else { lastival = slastival[threadIdx.y]; } __syncthreads(); if (tid == 0) { tmp = slastival[33]; slastival[0] = tmp; } __syncthreads(); if (ival == lastival) impty = 1e7f; // Eliminate values which are not at value boundaries if (impty < minimpty) { minimpty = impty; bestival = ival; } minup2(minimpty,bestival); minimpty = __shfl(minimpty, jlast); bestival = __shfl(bestival, jlast); if (threadIdx.x == 0) { sminimpty[threadIdx.y] = minimpty; sbestival[threadIdx.y] = bestival; } __syncthreads(); if (threadIdx.y == 0) { minimpty = sminimpty[threadIdx.x]; bestival = sbestival[threadIdx.x]; minup2(minimpty,bestival); minimpty = __shfl(minimpty, blockDim.y - 1); bestival = __shfl(bestival, blockDim.y - 1); sminimpty[threadIdx.x] = minimpty; sbestival[threadIdx.x] = bestival; } __syncthreads(); } if (tid == 0) { outv[i] = bestival; // Output the best split feature value outf[i] = (int)((key >> ishift) & imask); // Save the feature index // outg[i] = T::fresult(sacct[0], tott) - minimpty; // And the impurity gain outg[i] = T::fresult(sacct[0], tott); // And the impurity gain } __syncthreads(); } } #else template<class T> __global__ void __minImpuritya(long long *keys, int *counts, int *outv, int *outf, float *outg, int *outc, int *jc, int *fieldlens, int nnodes, int ncats, int nsamps) {} template<class T> __global__ void __minImpurityb(long long *keys, int *counts, int *outv, int *outf, float *outg, int *outc, int *jc, int *fieldlens, int nnodes, int ncats, int nsamps) {} #endif int minImpurity(long long *keys, int *counts, int *outv, int *outf, float *outg, int *outc, int *jc, int *fieldlens, int nnodes, int ncats, int nsamps, int impType) { // Note: its safe to round ncats up to a multiple of 32, since its only used to split shmem int ny = min(32, DBSIZE/ncats/2); dim3 tdim(32, ny, 1); int ng = min(64, nnodes*nsamps); if ((impType & 2) == 0) { if ((impType & 1) == 0) { __minImpuritya<entImpty><<<ng,tdim>>>(keys, counts, outv, outf, outg, outc, jc, fieldlens, nnodes, ncats, nsamps); } else { __minImpuritya<giniImpty><<<ng,tdim>>>(keys, counts, outv, outf, outg, outc, jc, fieldlens, nnodes, ncats, nsamps); } } else { if ((impType & 1) == 0) { __minImpurityb<entImpty><<<ng,tdim>>>(keys, counts, outv, outf, outg, outc, jc, fieldlens, nnodes, ncats, nsamps); } else { __minImpurityb<giniImpty><<<ng,tdim>>>(keys, counts, outv, outf, outg, outc, jc, fieldlens, nnodes, ncats, nsamps); } } fflush(stdout); cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError(); return err; } __global__ void __findBoundaries(long long *keys, int *jc, int n, int njc, int shift) { __shared__ int dbuff[1024]; int i, j, iv, lasti; int imin = ((int)(32 * ((((long long)n) * blockIdx.x) / (gridDim.x * 32)))); int imax = min(n, ((int)(32 * ((((long long)n) * (blockIdx.x + 1)) / (gridDim.x * 32) + 1)))); int tid = threadIdx.x + blockDim.x * threadIdx.y; if (tid == 0 && blockIdx.x == 0) { jc[0] = 0; } __syncthreads(); lasti = 0x7fffffff; for (i = imin; i <= imax; i += blockDim.x * blockDim.y) { iv = njc; if (i + tid < imax) { iv = (int)(keys[i + tid] >> shift); dbuff[tid] = iv; } __syncthreads(); if (i + tid < imax || i + tid == n) { if (tid > 0) lasti = dbuff[tid - 1]; if (iv > lasti) { for (j = lasti+1; j <= iv; j++) { jc[j] = i + tid; } } if (tid == 0) { lasti = dbuff[blockDim.x * blockDim.y - 1]; } } __syncthreads(); } } int findBoundaries(long long *keys, int *jc, int n, int njc, int shift) { int ny = min(32, 1 + (n-1)/32); dim3 tdim(32, ny, 1); int ng = min(64, 1+n/32/ny); __findBoundaries<<<ng,tdim>>>(keys, jc, n, njc, shift); cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError(); return err; } __global__ void __floatToInt(int n, float *in, int *out, int nbits) { int fshift = 32 - nbits; int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); for (int i = ip; i < n; i += blockDim.x * gridDim.x * gridDim.y) { float v = in[i]; int ival = getFloatBits(v, fshift); out[i] = ival; } } int floatToInt(int n, float *in, int *out, int nbits) { int nthreads; dim3 griddims; setsizes(n, &griddims, &nthreads); __floatToInt<<<griddims,nthreads>>>(n, in, out, nbits); cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError(); return err; } __global__ void __jfeatsToIfeats(int itree, int *inodes, int *jfeats, int *ifeats, int n, int nfeats, int seed) { int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); for (int i = ip; i < n; i += blockDim.x * gridDim.x * gridDim.y) { int inode = inodes[i]; int jfeat = jfeats[i]; int ifeat = mmhash3(itree, inode, jfeat, nfeats, seed); ifeats[i] = ifeat; } } int jfeatsToIfeats(int itree, int *inodes, int *jfeats, int *ifeats, int n, int nfeats, int seed) { int nthreads; dim3 griddims; setsizes(n, &griddims, &nthreads); __jfeatsToIfeats<<<griddims,nthreads>>>(itree, inodes, jfeats, ifeats, n, nfeats, seed); cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError(); return err; }
8eb1607c504b82efc791a9e500463479d5f489ff.hip
// !!! This is a file automatically generated by hipify!!! #include "compute_com_extents.h" #include <mirheo/core/pvs/object_vector.h> #include <mirheo/core/pvs/views/ov.h> #include <mirheo/core/utils/cuda_common.h> #include <mirheo/core/utils/kernel_launch.h> #include <hip/hip_runtime.h> namespace mirheo { namespace compute_com_extents_kernels { __global__ void minMaxCom(OVview ovView) { const int gid = threadIdx.x + blockDim.x * blockIdx.x; const int objId = gid / warpSize; const int laneId = gid % warpSize; if (objId >= ovView.nObjects) return; real3 mymin = make_real3(+1e10_r); real3 mymax = make_real3(-1e10_r); real3 mycom = make_real3(0.0_r); #pragma unroll 3 for (int i = laneId; i < ovView.objSize; i += warpSize) { const int offset = objId * ovView.objSize + i; const real3 coo = make_real3(ovView.readPosition(offset)); mymin = math::min(mymin, coo); mymax = math::max(mymax, coo); mycom += coo; } mycom = warpReduce( mycom, [] (real a, real b) { return a+b; } ); mymin = warpReduce( mymin, [] (real a, real b) { return math::min(a, b); } ); mymax = warpReduce( mymax, [] (real a, real b) { return math::max(a, b); } ); if (laneId == 0) ovView.comAndExtents[objId] = {mycom / ovView.objSize, mymin, mymax}; } } // namespace compute_com_extents_kernels void computeComExtents(ObjectVector *ov, LocalObjectVector *lov, hipStream_t stream) { OVview view(ov, lov); constexpr int warpSize = 32; const int nthreads = 128; const int nblocks = getNblocks(view.nObjects * warpSize, nthreads); SAFE_KERNEL_LAUNCH( compute_com_extents_kernels::minMaxCom, nblocks, nthreads, 0, stream, view ); } } // namespace mirheo
8eb1607c504b82efc791a9e500463479d5f489ff.cu
#include "compute_com_extents.h" #include <mirheo/core/pvs/object_vector.h> #include <mirheo/core/pvs/views/ov.h> #include <mirheo/core/utils/cuda_common.h> #include <mirheo/core/utils/kernel_launch.h> #include <cuda_runtime.h> namespace mirheo { namespace compute_com_extents_kernels { __global__ void minMaxCom(OVview ovView) { const int gid = threadIdx.x + blockDim.x * blockIdx.x; const int objId = gid / warpSize; const int laneId = gid % warpSize; if (objId >= ovView.nObjects) return; real3 mymin = make_real3(+1e10_r); real3 mymax = make_real3(-1e10_r); real3 mycom = make_real3(0.0_r); #pragma unroll 3 for (int i = laneId; i < ovView.objSize; i += warpSize) { const int offset = objId * ovView.objSize + i; const real3 coo = make_real3(ovView.readPosition(offset)); mymin = math::min(mymin, coo); mymax = math::max(mymax, coo); mycom += coo; } mycom = warpReduce( mycom, [] (real a, real b) { return a+b; } ); mymin = warpReduce( mymin, [] (real a, real b) { return math::min(a, b); } ); mymax = warpReduce( mymax, [] (real a, real b) { return math::max(a, b); } ); if (laneId == 0) ovView.comAndExtents[objId] = {mycom / ovView.objSize, mymin, mymax}; } } // namespace compute_com_extents_kernels void computeComExtents(ObjectVector *ov, LocalObjectVector *lov, cudaStream_t stream) { OVview view(ov, lov); constexpr int warpSize = 32; const int nthreads = 128; const int nblocks = getNblocks(view.nObjects * warpSize, nthreads); SAFE_KERNEL_LAUNCH( compute_com_extents_kernels::minMaxCom, nblocks, nthreads, 0, stream, view ); } } // namespace mirheo
65beaeb4ca229e7924e0d0d6683f7be87abd2616.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "First_Initialize_Kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int size = XSIZE*YSIZE; unsigned int *randoms = NULL; hipMalloc(&randoms, XSIZE*YSIZE); int *bestSeen = NULL; hipMalloc(&bestSeen, XSIZE*YSIZE); int *origin = NULL; hipMalloc(&origin, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( First_Initialize_Kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, size,randoms,bestSeen,origin); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( First_Initialize_Kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, size,randoms,bestSeen,origin); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( First_Initialize_Kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, size,randoms,bestSeen,origin); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
65beaeb4ca229e7924e0d0d6683f7be87abd2616.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "First_Initialize_Kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int size = XSIZE*YSIZE; unsigned int *randoms = NULL; cudaMalloc(&randoms, XSIZE*YSIZE); int *bestSeen = NULL; cudaMalloc(&bestSeen, XSIZE*YSIZE); int *origin = NULL; cudaMalloc(&origin, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); First_Initialize_Kernel<<<gridBlock,threadBlock>>>(size,randoms,bestSeen,origin); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { First_Initialize_Kernel<<<gridBlock,threadBlock>>>(size,randoms,bestSeen,origin); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { First_Initialize_Kernel<<<gridBlock,threadBlock>>>(size,randoms,bestSeen,origin); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
8a8dffc3edf24e99f383e413e2e36d8899bd60a3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2021-2021, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <cassert> #define checkCudaErrors(Code) assert((Code) == hipSuccess) #define checkCudaLaunch(...) checkCudaErrors((__VA_ARGS__, hipPeekAtLastError())) static constexpr int NumThreads = 32; static constexpr int NumBlocks = 2; __global__ void vectorAdd(int *v) { int tx = threadIdx.x + blockDim.x * blockIdx.x; v[tx] += tx; } int main() { int *d_vec = nullptr; checkCudaErrors(hipMalloc((void**)&d_vec, sizeof(int) * NumBlocks * NumThreads)); // Size is missing `* sizeof(int)` checkCudaErrors(hipMemset(d_vec, 0, NumBlocks * NumThreads)); hipLaunchKernelGGL(( checkCudaLaunch(vectorAdd), dim3(NumBlocks), dim3(NumThreads), 0, 0, d_vec)); checkCudaErrors(hipDeviceSynchronize()); checkCudaErrors(hipFree(d_vec)); return 0; }
8a8dffc3edf24e99f383e413e2e36d8899bd60a3.cu
/* Copyright (c) 2021-2021, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <cassert> #define checkCudaErrors(Code) assert((Code) == cudaSuccess) #define checkCudaLaunch(...) checkCudaErrors((__VA_ARGS__, cudaPeekAtLastError())) static constexpr int NumThreads = 32; static constexpr int NumBlocks = 2; __global__ void vectorAdd(int *v) { int tx = threadIdx.x + blockDim.x * blockIdx.x; v[tx] += tx; } int main() { int *d_vec = nullptr; checkCudaErrors(cudaMalloc((void**)&d_vec, sizeof(int) * NumBlocks * NumThreads)); // Size is missing `* sizeof(int)` checkCudaErrors(cudaMemset(d_vec, 0, NumBlocks * NumThreads)); checkCudaLaunch(vectorAdd<<<NumBlocks, NumThreads>>>(d_vec)); checkCudaErrors(cudaDeviceSynchronize()); checkCudaErrors(cudaFree(d_vec)); return 0; }
f1cee9a49ac3cf14e0ddaf35eea06516cb564fcc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "cudaCalcGR.hh" #include <cmath> __global__ void reduceigr(uint32_t *igr, const uint32_t rnum, const uint32_t MPnum) { const uint32_t thID = blockDim.x * blockIdx.x + threadIdx.x; const uint32_t thNum = gridDim.x * blockDim.x; for (uint32_t j=thID;j<rnum;j+=thNum) { for (uint32_t i=1;i<MPnum;++i) { igr[j] += igr[j + i*rnum]; } } } __global__ void kigr2gr(uint32_t *igr, const uint32_t rnum, real *gr, const real Q) { const uint32_t thID = blockDim.x * blockIdx.x + threadIdx.x; const uint32_t thNum = gridDim.x * blockDim.x; /* * igr[] / N / (r^2 \Delta r) / \rho */ const real onethrid = 1.0 / 3.0; for (uint32_t j=thID+1;j<rnum;j+=thNum) { const real rinv2 = 1.0 / (4*j*j + onethrid); gr[j] = static_cast<real>(igr[j]) * Q * rinv2; } if (threadIdx.x == 0) { gr[0] = 0; } } void cudaCalcGR::makePairInfo(const cudaParticleMD &P) { std::vector<uint32_t> lbindex; lbindex.resize(P.totalNumBlock + 1); hipMemcpy(&(lbindex[0]), P.bindex, sizeof(uint32_t)*(P.totalNumBlock + 1), hipMemcpyDeviceToHost); std::cerr << "totalNumBlock: " << P.totalNumBlock << std::endl; if (withInfo) ErrorInfo("memcpy bindex failed"); long N = P.totalNumBlock * (P.totalNumBlock+1) / 2; pairInfo.reserve(N); pairInfo.resize(0); for (uint32_t I=0;I<P.totalNumBlock;++I) { if (lbindex[I]==UINT_MAX) continue; const uint32_t bstartI = lbindex[I]; int __I = I+1; while (lbindex[__I]==UINT_MAX) ++__I; const uint32_t bendI = lbindex[__I]; for (uint32_t J=I;J<P.totalNumBlock;++J) { if (lbindex[J]==UINT_MAX) continue; const uint32_t bstartJ = lbindex[J]; int __J = J+1; while (lbindex[__J]==UINT_MAX) ++__J; const uint32_t bendJ = lbindex[__J]; //std::cerr << bstartJ << ":" << bendJ << " " << std::flush; // candidates I-J block pair uint32_t i = bstartI; do { uint32_t j = bstartJ; do { uint4 p; p.x = i; p.y = ::min(i+64, bendI); p.z = j; p.w = ::min(j+64, bendJ); assert(p.y-p.x <= 64); assert(p.w-p.z <= 64); pairInfo.push_back(p); j+=64; } while (j<bendJ); i+=64; } while (i<bendI); } //std::cerr << std::endl; } std::cerr << "N(N+1)/2: " << N << std::endl << "pairInfo size: " << pairInfo.size() << std::endl; if (pInfo != NULL) { hipFree(pInfo); } if (withInfo) ErrorInfo("cudafree pInfo failed"); const size_t psize = sizeof(uint4)*bunchsize; std::cerr << "pInfo size: " << psize << std::endl; hipMalloc((void **)&pInfo, psize); if (withInfo) ErrorInfo("malloc pInfo failed"); } void cudaCalcGR::calcgr(const cudaParticleMD &part) { for (int i=0;i<9;++i) std::cerr << part.cell[i] << " "; std::cerr << std::endl; class calcGR_F4 P; P.cx = part.cell[6]; P.cy = part.cell[7]; P.cz = part.cell[8]; //P.typeID = typeID_s; P.rstepinv = rstepinv; P.rnum = rnum; dim3 _mpnum, _thnum; _mpnum.x = MPnum ; _mpnum.y = 1; _mpnum.z = 1; _thnum.x = THnum2D; _thnum.y = THnum2D; _thnum.z = 1; for (uint32_t i=0;i<pairInfo.size();i+=bunchsize) { std::cerr << i << "\t" << std::flush; const uint32_t psize = ::min(bunchsize, (pairInfo.size() - i)); hipMemcpy(pInfo, &(pairInfo[i]), sizeof(uint4)*psize, hipMemcpyHostToDevice); if (withInfo) ErrorInfo("makePairInfo() failed"); hipLaunchKernelGGL(( calcF_IJpairWithBlock5_F4), dim3(_mpnum), dim3(_thnum), sizeof(uint2)*4096, 0, P, part.r_s, pInfo, psize, igr, rnum ); if (withInfo) ErrorInfo("calcgr() failed"); } std::cerr << "done" << std::endl; } void cudaCalcGR::igr2gr(const cudaParticleMD &part) { hipLaunchKernelGGL(( reduceigr), dim3(MPnum), dim3(THnum1D), 0, 0, igr, rnum, MPnum); if (withInfo) ErrorInfo("reduceigr failed"); const real rhoinv = (part.cell[1]-part.cell[0]) * (part.cell[3]-part.cell[2]) * (part.cell[5]-part.cell[4]) / part.N; /* * Volume of spherical shell in the range (r-\Delta r/2, r+\Delta r/2) is * 4\pi / 3 (r+\Delta r/2)^3 - 4\pi / 3 (r-\Delta r/2)^3 * = 4pi / 3 (3r^2 \Delta r + 2(\Delta r/2)^3) * 4\pi r^2 \Delta r + 1/3 \pi \Delta r^3, * 1/V = 1 / (\pi \Delta r^3(4 i^2 + 1/3)) */ const real Vinv = M_1_PI * rstepinv * rstepinv * rstepinv; const real Q = 2.0 / part.N * Vinv * rhoinv; hipLaunchKernelGGL(( kigr2gr), dim3(MPnum), dim3(THnum1D), 0, 0, igr, rnum, gr, Q); if (withInfo) ErrorInfo("kigr2gr failed"); } void cudaCalcGR::getGr(std::ostream &o) { size_t sizeN = sizeof(real) * rnum; pthread_mutex_lock(&mutTMP); hipMemcpy(&(TMP[0]), gr, sizeN, hipMemcpyDeviceToHost); for (uint32_t i=0;i<rnum;++i) { const real r = i / rstepinv; o << r << " " << TMP[i] << std::endl; } pthread_mutex_unlock(&mutTMP); if (withInfo) ErrorInfo("do getGr"); }
f1cee9a49ac3cf14e0ddaf35eea06516cb564fcc.cu
#include "cudaCalcGR.hh" #include <cmath> __global__ void reduceigr(uint32_t *igr, const uint32_t rnum, const uint32_t MPnum) { const uint32_t thID = blockDim.x * blockIdx.x + threadIdx.x; const uint32_t thNum = gridDim.x * blockDim.x; for (uint32_t j=thID;j<rnum;j+=thNum) { for (uint32_t i=1;i<MPnum;++i) { igr[j] += igr[j + i*rnum]; } } } __global__ void kigr2gr(uint32_t *igr, const uint32_t rnum, real *gr, const real Q) { const uint32_t thID = blockDim.x * blockIdx.x + threadIdx.x; const uint32_t thNum = gridDim.x * blockDim.x; /* * igr[] / N / (r^2 \Delta r) / \rho */ const real onethrid = 1.0 / 3.0; for (uint32_t j=thID+1;j<rnum;j+=thNum) { const real rinv2 = 1.0 / (4*j*j + onethrid); gr[j] = static_cast<real>(igr[j]) * Q * rinv2; } if (threadIdx.x == 0) { gr[0] = 0; } } void cudaCalcGR::makePairInfo(const cudaParticleMD &P) { std::vector<uint32_t> lbindex; lbindex.resize(P.totalNumBlock + 1); cudaMemcpy(&(lbindex[0]), P.bindex, sizeof(uint32_t)*(P.totalNumBlock + 1), cudaMemcpyDeviceToHost); std::cerr << "totalNumBlock: " << P.totalNumBlock << std::endl; if (withInfo) ErrorInfo("memcpy bindex failed"); long N = P.totalNumBlock * (P.totalNumBlock+1) / 2; pairInfo.reserve(N); pairInfo.resize(0); for (uint32_t I=0;I<P.totalNumBlock;++I) { if (lbindex[I]==UINT_MAX) continue; const uint32_t bstartI = lbindex[I]; int __I = I+1; while (lbindex[__I]==UINT_MAX) ++__I; const uint32_t bendI = lbindex[__I]; for (uint32_t J=I;J<P.totalNumBlock;++J) { if (lbindex[J]==UINT_MAX) continue; const uint32_t bstartJ = lbindex[J]; int __J = J+1; while (lbindex[__J]==UINT_MAX) ++__J; const uint32_t bendJ = lbindex[__J]; //std::cerr << bstartJ << ":" << bendJ << " " << std::flush; // candidates I-J block pair uint32_t i = bstartI; do { uint32_t j = bstartJ; do { uint4 p; p.x = i; p.y = std::min(i+64, bendI); p.z = j; p.w = std::min(j+64, bendJ); assert(p.y-p.x <= 64); assert(p.w-p.z <= 64); pairInfo.push_back(p); j+=64; } while (j<bendJ); i+=64; } while (i<bendI); } //std::cerr << std::endl; } std::cerr << "N(N+1)/2: " << N << std::endl << "pairInfo size: " << pairInfo.size() << std::endl; if (pInfo != NULL) { cudaFree(pInfo); } if (withInfo) ErrorInfo("cudafree pInfo failed"); const size_t psize = sizeof(uint4)*bunchsize; std::cerr << "pInfo size: " << psize << std::endl; cudaMalloc((void **)&pInfo, psize); if (withInfo) ErrorInfo("malloc pInfo failed"); } void cudaCalcGR::calcgr(const cudaParticleMD &part) { for (int i=0;i<9;++i) std::cerr << part.cell[i] << " "; std::cerr << std::endl; class calcGR_F4 P; P.cx = part.cell[6]; P.cy = part.cell[7]; P.cz = part.cell[8]; //P.typeID = typeID_s; P.rstepinv = rstepinv; P.rnum = rnum; dim3 _mpnum, _thnum; _mpnum.x = MPnum ; _mpnum.y = 1; _mpnum.z = 1; _thnum.x = THnum2D; _thnum.y = THnum2D; _thnum.z = 1; for (uint32_t i=0;i<pairInfo.size();i+=bunchsize) { std::cerr << i << "\t" << std::flush; const uint32_t psize = std::min(bunchsize, (pairInfo.size() - i)); cudaMemcpy(pInfo, &(pairInfo[i]), sizeof(uint4)*psize, cudaMemcpyHostToDevice); if (withInfo) ErrorInfo("makePairInfo() failed"); calcF_IJpairWithBlock5_F4<<<_mpnum, _thnum, sizeof(uint2)*4096>>>(P, part.r_s, pInfo, psize, igr, rnum ); if (withInfo) ErrorInfo("calcgr() failed"); } std::cerr << "done" << std::endl; } void cudaCalcGR::igr2gr(const cudaParticleMD &part) { reduceigr<<<MPnum, THnum1D>>>(igr, rnum, MPnum); if (withInfo) ErrorInfo("reduceigr failed"); const real rhoinv = (part.cell[1]-part.cell[0]) * (part.cell[3]-part.cell[2]) * (part.cell[5]-part.cell[4]) / part.N; /* * Volume of spherical shell in the range (r-\Delta r/2, r+\Delta r/2) is * 4\pi / 3 (r+\Delta r/2)^3 - 4\pi / 3 (r-\Delta r/2)^3 * = 4pi / 3 (3r^2 \Delta r + 2(\Delta r/2)^3) * 4\pi r^2 \Delta r + 1/3 \pi \Delta r^3, * 1/V = 1 / (\pi \Delta r^3(4 i^2 + 1/3)) */ const real Vinv = M_1_PI * rstepinv * rstepinv * rstepinv; const real Q = 2.0 / part.N * Vinv * rhoinv; kigr2gr<<<MPnum, THnum1D>>>(igr, rnum, gr, Q); if (withInfo) ErrorInfo("kigr2gr failed"); } void cudaCalcGR::getGr(std::ostream &o) { size_t sizeN = sizeof(real) * rnum; pthread_mutex_lock(&mutTMP); cudaMemcpy(&(TMP[0]), gr, sizeN, cudaMemcpyDeviceToHost); for (uint32_t i=0;i<rnum;++i) { const real r = i / rstepinv; o << r << " " << TMP[i] << std::endl; } pthread_mutex_unlock(&mutTMP); if (withInfo) ErrorInfo("do getGr"); }
b421eb93a9d3b85bd6dfc067ac4c83d4f9d0c08b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * * Matrix Multiplication - CUDA for GPUs * * CS3210 * **/ #include <stdio.h> #include <stdlib.h> #include <time.h> #include <sys/time.h> #include <assert.h> int size; typedef struct { float ** element; } matrix; long long wall_clock_time() { #ifdef __linux__ struct timespec tp; clock_gettime(CLOCK_REALTIME, &tp); return (long long)(tp.tv_nsec + (long long)tp.tv_sec * 1000000000ll); #else struct timeval tv; gettimeofday(&tv, NULL); return (long long)(tv.tv_usec * 1000 + (long long)tv.tv_sec * 1000000000ll); #endif } /** * Allocates memory for a matrix of size SIZE * The memory is allocated row-major order, i.e. * elements from the same row are allocated at contiguous * memory addresses. **/ void allocate_matrix(matrix* m) { int i; hipError_t rc; // allocate array for all the rows rc = hipMallocManaged((void**)&(m->element), sizeof(float*) * size); if (rc != hipSuccess) { fprintf(stderr, "CUDA error: %s\n", hipGetErrorString(rc)); exit(1); } // allocate an array for each row of the matrix for (i = 0; i < size; i++) { rc = hipMallocManaged((void**)&(m->element[i]), sizeof(float) * size); if (rc != hipSuccess) { fprintf(stderr, "CUDA error: %s\n", hipGetErrorString(rc)); exit(1); } } } /** * Free the memory allocated for a matrix. **/ void free_matrix(matrix* m) { int i; for (i = 0; i < size; i++) hipFree(m->element[i]); hipFree(m->element); } /** * Initializes the elements of the matrix with * random values between 0 and 9 **/ void init_matrix(matrix m) { int i, j; for (i = 0; i < size; i++) for (j = 0; j < size; j++) { m.element[i][j] = rand() % 10; } } /** * Initializes the elements of the matrix with * element 0. **/ void init_matrix_zero(matrix m) { int i, j; for (i = 0; i < size; i++) for (j = 0; j < size; j++) { m.element[i][j] = 0.0; } } /** * Multiplies matrix @a with matrix @b storing * the result in matrix @result * * The multiplication algorithm is the O(n^3) * algorithm */ void mm(matrix a, matrix b, matrix result) { int i, j, k; // Do the multiplication for (i = 0; i < size; i++) for (j = 0; j < size; j++) for(k = 0; k < size; k++) result.element[i][j] += a.element[i][k] * b.element[k][j]; } /** * Each kernel computes the result element (i,j). */ __global__ void mm_kernel(matrix a, matrix b, matrix result, int size) { int i = blockIdx.x; int j = blockIdx.y; int k = threadIdx.x; if (i >= size || j >= size) return; result.element[i][k] += a.element[i][j] * b.element[j][k]; } void print_matrix(matrix m) { int i, j; for (i = 0; i < size; i++) { printf("row %4d: ", i); for (j = 0; j < size; j++) printf("%6.2f ", m.element[i][j]); printf("\n"); } } void work() { matrix a, b, result1, result2; long long before, after; int correct, i, j; hipError_t rc; // Allocate memory for matrices allocate_matrix(&a); allocate_matrix(&b); allocate_matrix(&result1); allocate_matrix(&result2); // Initialize matrix elements init_matrix(a); init_matrix(b); // Perform sequential matrix multiplication before = wall_clock_time(); mm(a, b, result1); after = wall_clock_time(); fprintf(stderr, "Matrix multiplication on CPU took %1.2f seconds\n", ((float)(after - before))/1000000000); // Perform CUDA matrix multiplication dim3 block(size); // a block of size CUDA threads dim3 grid(size, size); // a grid of size x size CUDA thread blocks before = wall_clock_time(); hipLaunchKernelGGL(( mm_kernel), dim3(grid), dim3(block), 0, 0, a, b, result2, size); hipDeviceSynchronize(); after = wall_clock_time(); fprintf(stderr, "Matrix multiplication on GPU took %1.2f seconds\n", ((float)(after - before))/1000000000); // was there any error? rc = hipGetLastError(); if (rc != hipSuccess) printf("Last CUDA error %s\n", hipGetErrorString(rc)); // Compare the results correct = 1; for (i = 0; correct && i < size; i++) for (j = 0; j < size; j++) if (result1.element[i][j] != result2.element[i][j]) { correct = 0; break; } if (correct) printf("The result matrices are identical!\n"); else printf("Difference in result matrices at element (%d, %d)!\n", i, j); free_matrix(&a); free_matrix(&b); free_matrix(&result1); free_matrix(&result2); } int main(int argc, char ** argv) { srand(0); printf("Usage: %s <size>\n", argv[0]); if (argc >= 2) size = atoi(argv[1]); else size = 1024; fprintf(stderr,"Sequential matrix multiplication of size %d\n", size); // Multiply the matrices work(); return 0; }
b421eb93a9d3b85bd6dfc067ac4c83d4f9d0c08b.cu
/** * * Matrix Multiplication - CUDA for GPUs * * CS3210 * **/ #include <stdio.h> #include <stdlib.h> #include <time.h> #include <sys/time.h> #include <assert.h> int size; typedef struct { float ** element; } matrix; long long wall_clock_time() { #ifdef __linux__ struct timespec tp; clock_gettime(CLOCK_REALTIME, &tp); return (long long)(tp.tv_nsec + (long long)tp.tv_sec * 1000000000ll); #else struct timeval tv; gettimeofday(&tv, NULL); return (long long)(tv.tv_usec * 1000 + (long long)tv.tv_sec * 1000000000ll); #endif } /** * Allocates memory for a matrix of size SIZE * The memory is allocated row-major order, i.e. * elements from the same row are allocated at contiguous * memory addresses. **/ void allocate_matrix(matrix* m) { int i; cudaError_t rc; // allocate array for all the rows rc = cudaMallocManaged((void**)&(m->element), sizeof(float*) * size); if (rc != cudaSuccess) { fprintf(stderr, "CUDA error: %s\n", cudaGetErrorString(rc)); exit(1); } // allocate an array for each row of the matrix for (i = 0; i < size; i++) { rc = cudaMallocManaged((void**)&(m->element[i]), sizeof(float) * size); if (rc != cudaSuccess) { fprintf(stderr, "CUDA error: %s\n", cudaGetErrorString(rc)); exit(1); } } } /** * Free the memory allocated for a matrix. **/ void free_matrix(matrix* m) { int i; for (i = 0; i < size; i++) cudaFree(m->element[i]); cudaFree(m->element); } /** * Initializes the elements of the matrix with * random values between 0 and 9 **/ void init_matrix(matrix m) { int i, j; for (i = 0; i < size; i++) for (j = 0; j < size; j++) { m.element[i][j] = rand() % 10; } } /** * Initializes the elements of the matrix with * element 0. **/ void init_matrix_zero(matrix m) { int i, j; for (i = 0; i < size; i++) for (j = 0; j < size; j++) { m.element[i][j] = 0.0; } } /** * Multiplies matrix @a with matrix @b storing * the result in matrix @result * * The multiplication algorithm is the O(n^3) * algorithm */ void mm(matrix a, matrix b, matrix result) { int i, j, k; // Do the multiplication for (i = 0; i < size; i++) for (j = 0; j < size; j++) for(k = 0; k < size; k++) result.element[i][j] += a.element[i][k] * b.element[k][j]; } /** * Each kernel computes the result element (i,j). */ __global__ void mm_kernel(matrix a, matrix b, matrix result, int size) { int i = blockIdx.x; int j = blockIdx.y; int k = threadIdx.x; if (i >= size || j >= size) return; result.element[i][k] += a.element[i][j] * b.element[j][k]; } void print_matrix(matrix m) { int i, j; for (i = 0; i < size; i++) { printf("row %4d: ", i); for (j = 0; j < size; j++) printf("%6.2f ", m.element[i][j]); printf("\n"); } } void work() { matrix a, b, result1, result2; long long before, after; int correct, i, j; cudaError_t rc; // Allocate memory for matrices allocate_matrix(&a); allocate_matrix(&b); allocate_matrix(&result1); allocate_matrix(&result2); // Initialize matrix elements init_matrix(a); init_matrix(b); // Perform sequential matrix multiplication before = wall_clock_time(); mm(a, b, result1); after = wall_clock_time(); fprintf(stderr, "Matrix multiplication on CPU took %1.2f seconds\n", ((float)(after - before))/1000000000); // Perform CUDA matrix multiplication dim3 block(size); // a block of size CUDA threads dim3 grid(size, size); // a grid of size x size CUDA thread blocks before = wall_clock_time(); mm_kernel<<<grid, block>>>(a, b, result2, size); cudaDeviceSynchronize(); after = wall_clock_time(); fprintf(stderr, "Matrix multiplication on GPU took %1.2f seconds\n", ((float)(after - before))/1000000000); // was there any error? rc = cudaGetLastError(); if (rc != cudaSuccess) printf("Last CUDA error %s\n", cudaGetErrorString(rc)); // Compare the results correct = 1; for (i = 0; correct && i < size; i++) for (j = 0; j < size; j++) if (result1.element[i][j] != result2.element[i][j]) { correct = 0; break; } if (correct) printf("The result matrices are identical!\n"); else printf("Difference in result matrices at element (%d, %d)!\n", i, j); free_matrix(&a); free_matrix(&b); free_matrix(&result1); free_matrix(&result2); } int main(int argc, char ** argv) { srand(0); printf("Usage: %s <size>\n", argv[0]); if (argc >= 2) size = atoi(argv[1]); else size = 1024; fprintf(stderr,"Sequential matrix multiplication of size %d\n", size); // Multiply the matrices work(); return 0; }
c68d8add60c3665ac3d15e70f81017df33882608.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdlib.h> #include <stdio.h> #include <math.h> #define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ )) static void HandleError( hipError_t err, const char *file, int line ) { if (err != hipSuccess) { printf( "%s in %s at line %d\n", hipGetErrorString( err ), file, line ); exit( EXIT_FAILURE ); } } // this kernel computes the vector sum c = a + b // each thread performs one pair-wise addition __global__ void vector_add(const float *a, const float *b, float *c, const size_t n) { // compute the global element index this thread should process unsigned int i = threadIdx.x + blockDim.x * blockIdx.x; // avoid accessing out of bounds elements if(i < n) { // sum elements c[i] = a[i] + b[i]; } } int main(void) { // create arrays of 1M elements int num_elements = 0 ; printf("Enter number of elements to add"); scanf("%d", &num_elements); // compute the size of the arrays in bytes const int num_bytes = num_elements * sizeof(float); // points to host & device arrays float *device_array_a = 0; float *device_array_b = 0; float *device_array_c = 0; float *host_array_a = 0; float *host_array_b = 0; float *host_array_c = 0; // malloc the host arrays host_array_a = (float*)malloc(num_bytes); host_array_b = (float*)malloc(num_bytes); host_array_c = (float*)malloc(num_bytes); // hipMalloc the device arrays HANDLE_ERROR(hipMalloc((void**)&device_array_a, num_bytes)); HANDLE_ERROR(hipMalloc((void**)&device_array_b, num_bytes)); HANDLE_ERROR(hipMalloc((void**)&device_array_c, num_bytes)); // initialize host_array_a & host_array_b for(int i = 0; i < num_elements; ++i) { // make array a a linear ramp host_array_a[i] = (float)i; // make array b random host_array_b[i] = (float)rand() / RAND_MAX; } // copy arrays a & b to the device memory space HANDLE_ERROR(hipMemcpy(device_array_a, host_array_a, num_bytes, hipMemcpyHostToDevice)); HANDLE_ERROR(hipMemcpy(device_array_b, host_array_b, num_bytes, hipMemcpyHostToDevice)); // launch the kernel hipLaunchKernelGGL(( vector_add) , dim3(ceil(num_elements/32.0)), dim3(32), 0, 0, device_array_a, device_array_b, device_array_c, num_elements); // copy the result back to the host memory space HANDLE_ERROR(hipMemcpy(host_array_c, device_array_c, num_bytes, hipMemcpyDeviceToHost)); for(int i = 0; i < num_elements; ++i) { printf("result %d: %1.1f + %7.1f = %7.1f\n", i, host_array_a[i], host_array_b[i], host_array_c[i]); } // deallocate memory free(host_array_a); free(host_array_b); free(host_array_c); HANDLE_ERROR(hipFree(device_array_a)); HANDLE_ERROR(hipFree(device_array_b)); HANDLE_ERROR(hipFree(device_array_c)); }
c68d8add60c3665ac3d15e70f81017df33882608.cu
#include <stdlib.h> #include <stdio.h> #include <math.h> #define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ )) static void HandleError( cudaError_t err, const char *file, int line ) { if (err != cudaSuccess) { printf( "%s in %s at line %d\n", cudaGetErrorString( err ), file, line ); exit( EXIT_FAILURE ); } } // this kernel computes the vector sum c = a + b // each thread performs one pair-wise addition __global__ void vector_add(const float *a, const float *b, float *c, const size_t n) { // compute the global element index this thread should process unsigned int i = threadIdx.x + blockDim.x * blockIdx.x; // avoid accessing out of bounds elements if(i < n) { // sum elements c[i] = a[i] + b[i]; } } int main(void) { // create arrays of 1M elements int num_elements = 0 ; printf("Enter number of elements to add"); scanf("%d", &num_elements); // compute the size of the arrays in bytes const int num_bytes = num_elements * sizeof(float); // points to host & device arrays float *device_array_a = 0; float *device_array_b = 0; float *device_array_c = 0; float *host_array_a = 0; float *host_array_b = 0; float *host_array_c = 0; // malloc the host arrays host_array_a = (float*)malloc(num_bytes); host_array_b = (float*)malloc(num_bytes); host_array_c = (float*)malloc(num_bytes); // cudaMalloc the device arrays HANDLE_ERROR(cudaMalloc((void**)&device_array_a, num_bytes)); HANDLE_ERROR(cudaMalloc((void**)&device_array_b, num_bytes)); HANDLE_ERROR(cudaMalloc((void**)&device_array_c, num_bytes)); // initialize host_array_a & host_array_b for(int i = 0; i < num_elements; ++i) { // make array a a linear ramp host_array_a[i] = (float)i; // make array b random host_array_b[i] = (float)rand() / RAND_MAX; } // copy arrays a & b to the device memory space HANDLE_ERROR(cudaMemcpy(device_array_a, host_array_a, num_bytes, cudaMemcpyHostToDevice)); HANDLE_ERROR(cudaMemcpy(device_array_b, host_array_b, num_bytes, cudaMemcpyHostToDevice)); // launch the kernel vector_add <<< ceil(num_elements/32.0), 32>>>(device_array_a, device_array_b, device_array_c, num_elements); // copy the result back to the host memory space HANDLE_ERROR(cudaMemcpy(host_array_c, device_array_c, num_bytes, cudaMemcpyDeviceToHost)); for(int i = 0; i < num_elements; ++i) { printf("result %d: %1.1f + %7.1f = %7.1f\n", i, host_array_a[i], host_array_b[i], host_array_c[i]); } // deallocate memory free(host_array_a); free(host_array_b); free(host_array_c); HANDLE_ERROR(cudaFree(device_array_a)); HANDLE_ERROR(cudaFree(device_array_b)); HANDLE_ERROR(cudaFree(device_array_c)); }
db325d3e3652b513890337345e29d1b743571716.hip
// !!! This is a file automatically generated by hipify!!! // Eugene Shvarts // STA 250 | Fall 2013 //Accept-reject sampler for the truncated normal distribution (either end finite or infinite). //Attempts naive rejection sampling until a specified number of failures, then switches to //the method detailed in Robert (2009) (see the Lecture notes or the assignment). //In the unlikely event that the Robert method fails, will return NA after a (different) //specified number of failures. #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <hiprand/hiprand_kernel.h> #include <math_constants.h> extern "C" { __global__ void rtruncnorm_kernel(float *vals, int n, //vals is the input and output float *mu, float *sigma, //these are the distribution parameters float *lo, float *hi, //these are the truncation parameters int rng_a, int rng_b, int rng_c,//these are the RNG seeds int maxnaive, //after maxnaive attempts, switch methods int maxtries) //after maxtries attempts, cancel with error { // Usual block/thread indexing... int myblock = blockIdx.x + blockIdx.y * gridDim.x; int blocksize = blockDim.x * blockDim.y * blockDim.z; int subthread = threadIdx.z*(blockDim.x * blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x; int idx = myblock * blocksize + subthread; if (idx < n) { // Setup the RNG: hiprandState_t rng; hiprand_init(rng_a+idx*rng_b, rng_c, 0, &rng); // Sample: // First try naive rejection method, until maxnaive failures. for (int i = 1; i < maxnaive; i = i+1) { vals[idx] = mu[idx] + sigma[idx] * hiprand_normal(&rng); if (vals[idx] > lo[idx] && vals[idx] <= hi[idx]) { return; } } // If we made it this far without a return statement, we've had maxnaive failures. // So, it's time to try the more sophisticated truncated sampling method cf. Robert. // First we decide between two-sided and one-sided truncation. Note that there must be // some truncation; otherwise it is impossible to reach this point. int which_side = 0; float offset = 0; if (isfinite(lo[idx])) { offset = (lo[idx]-mu[idx])/sigma[idx]; which_side = which_side + 1; } if (isfinite(hi[idx])) { offset = (hi[idx]-mu[idx])/sigma[idx]; which_side = which_side - 1; } //When which_side = 1, truncation is unbounded to the right. //When which_side = -1, unbounded to the left. //When which_side = 0, bounded on both sides. float a_opt = 0; //represents the optimal alpha for the exponential distribution float temp = 0; //from sampling the exponential distribution by inverse CDF float u = 0; //generated to compare against the inverse CDF if (which_side != 0) { //the single-truncated case a_opt = 0.5 * (which_side * offset + sqrtf(powf(offset,2) + 4)); for (int i = 1; i<maxtries; i = i+1) { temp = hiprand_uniform(&rng); vals[idx] = which_side * offset - logf(temp) / a_opt; //the exponential sample temp = expf(-0.5 * powf(vals[idx]-a_opt, 2)); //using overwriting to conserve space u = hiprand_uniform(&rng); if (u <= temp) { //we used a standardized normal this entire time vals[idx] = mu[idx] + which_side*sigma[idx]*vals[idx]; return; } } // end maxtries loop } // end single-truncated case else { //the both-truncated-sides case float mu_m = (lo[idx]-mu[idx])/sigma[idx]; float mu_p = (hi[idx]-mu[idx])/sigma[idx]; if (mu_p < 0) { a_opt = powf(mu_p,2); } else if (mu_m > 0) { a_opt = powf(mu_m,2); } for (int i = 1; i<maxtries; i = i+1) { vals[idx] = (mu_p-mu_m) * hiprand_uniform(&rng) + mu_m; //uniform on the appropriate interval temp = expf(0.5 * (a_opt - powf(vals[idx],2))); //choose the right branch of the function u = hiprand_uniform(&rng); if (u <= temp) { //we used a standardized normal this entire time vals[idx] = mu[idx] + sigma[idx]*vals[idx]; return; } } // end maxtries loop } // end both-truncated case // If the code reaches this point, maxtries has been exhausted with no luck. } // end if idx < n vals[idx] = CUDART_NAN_F; //so, return NA return; } // end rtruncnorm_kernel } // END extern "C"
db325d3e3652b513890337345e29d1b743571716.cu
// Eugene Shvarts // STA 250 | Fall 2013 //Accept-reject sampler for the truncated normal distribution (either end finite or infinite). //Attempts naive rejection sampling until a specified number of failures, then switches to //the method detailed in Robert (2009) (see the Lecture notes or the assignment). //In the unlikely event that the Robert method fails, will return NA after a (different) //specified number of failures. #include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <curand_kernel.h> #include <math_constants.h> extern "C" { __global__ void rtruncnorm_kernel(float *vals, int n, //vals is the input and output float *mu, float *sigma, //these are the distribution parameters float *lo, float *hi, //these are the truncation parameters int rng_a, int rng_b, int rng_c,//these are the RNG seeds int maxnaive, //after maxnaive attempts, switch methods int maxtries) //after maxtries attempts, cancel with error { // Usual block/thread indexing... int myblock = blockIdx.x + blockIdx.y * gridDim.x; int blocksize = blockDim.x * blockDim.y * blockDim.z; int subthread = threadIdx.z*(blockDim.x * blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x; int idx = myblock * blocksize + subthread; if (idx < n) { // Setup the RNG: curandState rng; curand_init(rng_a+idx*rng_b, rng_c, 0, &rng); // Sample: // First try naive rejection method, until maxnaive failures. for (int i = 1; i < maxnaive; i = i+1) { vals[idx] = mu[idx] + sigma[idx] * curand_normal(&rng); if (vals[idx] > lo[idx] && vals[idx] <= hi[idx]) { return; } } // If we made it this far without a return statement, we've had maxnaive failures. // So, it's time to try the more sophisticated truncated sampling method cf. Robert. // First we decide between two-sided and one-sided truncation. Note that there must be // some truncation; otherwise it is impossible to reach this point. int which_side = 0; float offset = 0; if (isfinite(lo[idx])) { offset = (lo[idx]-mu[idx])/sigma[idx]; which_side = which_side + 1; } if (isfinite(hi[idx])) { offset = (hi[idx]-mu[idx])/sigma[idx]; which_side = which_side - 1; } //When which_side = 1, truncation is unbounded to the right. //When which_side = -1, unbounded to the left. //When which_side = 0, bounded on both sides. float a_opt = 0; //represents the optimal alpha for the exponential distribution float temp = 0; //from sampling the exponential distribution by inverse CDF float u = 0; //generated to compare against the inverse CDF if (which_side != 0) { //the single-truncated case a_opt = 0.5 * (which_side * offset + sqrtf(powf(offset,2) + 4)); for (int i = 1; i<maxtries; i = i+1) { temp = curand_uniform(&rng); vals[idx] = which_side * offset - logf(temp) / a_opt; //the exponential sample temp = expf(-0.5 * powf(vals[idx]-a_opt, 2)); //using overwriting to conserve space u = curand_uniform(&rng); if (u <= temp) { //we used a standardized normal this entire time vals[idx] = mu[idx] + which_side*sigma[idx]*vals[idx]; return; } } // end maxtries loop } // end single-truncated case else { //the both-truncated-sides case float mu_m = (lo[idx]-mu[idx])/sigma[idx]; float mu_p = (hi[idx]-mu[idx])/sigma[idx]; if (mu_p < 0) { a_opt = powf(mu_p,2); } else if (mu_m > 0) { a_opt = powf(mu_m,2); } for (int i = 1; i<maxtries; i = i+1) { vals[idx] = (mu_p-mu_m) * curand_uniform(&rng) + mu_m; //uniform on the appropriate interval temp = expf(0.5 * (a_opt - powf(vals[idx],2))); //choose the right branch of the function u = curand_uniform(&rng); if (u <= temp) { //we used a standardized normal this entire time vals[idx] = mu[idx] + sigma[idx]*vals[idx]; return; } } // end maxtries loop } // end both-truncated case // If the code reaches this point, maxtries has been exhausted with no luck. } // end if idx < n vals[idx] = CUDART_NAN_F; //so, return NA return; } // end rtruncnorm_kernel } // END extern "C"
376d55caab46a7fad3f3c3e55b02bf7bde538f56.hip
// !!! This is a file automatically generated by hipify!!! #include <torch/extension.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "helper_math.h" #include <thrust/device_vector.h> #include <vector> #include <iostream> template <typename scalar_t> __global__ void hv_cuda_forward_kernel( const torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> points, const torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> xyz_labels, const torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> scale_labels, const torch::PackedTensorAccessor32<scalar_t, 1, torch::RestrictPtrTraits> obj_labels, torch::PackedTensorAccessor32<scalar_t, 3, torch::RestrictPtrTraits> grid_obj, torch::PackedTensorAccessor32<scalar_t, 4, torch::RestrictPtrTraits> grid_rot, torch::PackedTensorAccessor32<scalar_t, 4, torch::RestrictPtrTraits> grid_scale, float3 corner, const float* __restrict__ res, const int* __restrict__ num_rots) { const int c = blockIdx.x * blockDim.x + threadIdx.x; if (c < points.size(0)) { scalar_t objness = obj_labels[c]; float3 corr = make_float3( xyz_labels[c][0] * scale_labels[c][0], xyz_labels[c][1] * scale_labels[c][1], xyz_labels[c][2] * scale_labels[c][2] ); float3 point = make_float3(points[c][0], points[c][1], points[c][2]); const float rot_interval = 2 * 3.141592654f / (*num_rots); for (int i = 0; i < (*num_rots); i++) { float theta = i * rot_interval; float3 offset = make_float3(-cos(theta) * corr.x + sin(theta) * corr.z, -corr.y, -sin(theta) * corr.x - cos(theta) * corr.z); float3 center_grid = (point + offset - corner) / (*res); if (center_grid.x < 0 || center_grid.y < 0 || center_grid.z < 0 || center_grid.x >= grid_obj.size(0) - 1 || center_grid.y >= grid_obj.size(1) - 1 || center_grid.z >= grid_obj.size(2) - 1) { continue; } int3 center_grid_floor = make_int3(center_grid); int3 center_grid_ceil = center_grid_floor + 1; float3 residual = fracf(center_grid); float3 w0 = 1.f - residual; float3 w1 = residual; float lll = w0.x * w0.y * w0.z * objness; float llh = w0.x * w0.y * w1.z * objness; float lhl = w0.x * w1.y * w0.z * objness; float lhh = w0.x * w1.y * w1.z * objness; float hll = w1.x * w0.y * w0.z * objness; float hlh = w1.x * w0.y * w1.z * objness; float hhl = w1.x * w1.y * w0.z * objness; float hhh = w1.x * w1.y * w1.z * objness; atomicAdd(&grid_obj[center_grid_floor.x][center_grid_floor.y][center_grid_floor.z], lll); atomicAdd(&grid_obj[center_grid_floor.x][center_grid_floor.y][center_grid_ceil.z], llh); atomicAdd(&grid_obj[center_grid_floor.x][center_grid_ceil.y][center_grid_floor.z], lhl); atomicAdd(&grid_obj[center_grid_floor.x][center_grid_ceil.y][center_grid_ceil.z], lhh); atomicAdd(&grid_obj[center_grid_ceil.x][center_grid_floor.y][center_grid_floor.z], hll); atomicAdd(&grid_obj[center_grid_ceil.x][center_grid_floor.y][center_grid_ceil.z], hlh); atomicAdd(&grid_obj[center_grid_ceil.x][center_grid_ceil.y][center_grid_floor.z], hhl); atomicAdd(&grid_obj[center_grid_ceil.x][center_grid_ceil.y][center_grid_ceil.z], hhh); float rot_vec[2] = {cos(theta), sin(theta)}; for (int j = 0; j < 2; j++) { float rot = rot_vec[j]; atomicAdd(&grid_rot[center_grid_floor.x][center_grid_floor.y][center_grid_floor.z][j], lll * rot); atomicAdd(&grid_rot[center_grid_floor.x][center_grid_floor.y][center_grid_ceil.z][j], llh * rot); atomicAdd(&grid_rot[center_grid_floor.x][center_grid_ceil.y][center_grid_floor.z][j], lhl * rot); atomicAdd(&grid_rot[center_grid_floor.x][center_grid_ceil.y][center_grid_ceil.z][j], lhh * rot); atomicAdd(&grid_rot[center_grid_ceil.x][center_grid_floor.y][center_grid_floor.z][j], hll * rot); atomicAdd(&grid_rot[center_grid_ceil.x][center_grid_floor.y][center_grid_ceil.z][j], hlh * rot); atomicAdd(&grid_rot[center_grid_ceil.x][center_grid_ceil.y][center_grid_floor.z][j], hhl * rot); atomicAdd(&grid_rot[center_grid_ceil.x][center_grid_ceil.y][center_grid_ceil.z][j], hhh * rot); } for (int j = 0; j < 3; j++) { float scale = scale_labels[c][j]; atomicAdd(&grid_scale[center_grid_floor.x][center_grid_floor.y][center_grid_floor.z][j], lll * scale); atomicAdd(&grid_scale[center_grid_floor.x][center_grid_floor.y][center_grid_ceil.z][j], llh * scale); atomicAdd(&grid_scale[center_grid_floor.x][center_grid_ceil.y][center_grid_floor.z][j], lhl * scale); atomicAdd(&grid_scale[center_grid_floor.x][center_grid_ceil.y][center_grid_ceil.z][j], lhh * scale); atomicAdd(&grid_scale[center_grid_ceil.x][center_grid_floor.y][center_grid_floor.z][j], hll * scale); atomicAdd(&grid_scale[center_grid_ceil.x][center_grid_floor.y][center_grid_ceil.z][j], hlh * scale); atomicAdd(&grid_scale[center_grid_ceil.x][center_grid_ceil.y][center_grid_floor.z][j], hhl * scale); atomicAdd(&grid_scale[center_grid_ceil.x][center_grid_ceil.y][center_grid_ceil.z][j], hhh * scale); } } } } template <typename scalar_t> __global__ void hv_cuda_average_kernel( const torch::PackedTensorAccessor32<scalar_t, 3, torch::RestrictPtrTraits> grid, torch::PackedTensorAccessor32<scalar_t, 4, torch::RestrictPtrTraits> grid_rot, torch::PackedTensorAccessor32<scalar_t, 4, torch::RestrictPtrTraits> grid_scale) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; const int z = blockIdx.z * blockDim.z + threadIdx.z; if (x >= grid.size(0) || y >= grid.size(1) || z >= grid.size(2)) return; float w = grid[x][y][z]; for (int j = 0; j < 2; j++) { grid_rot[x][y][z][j] /= w + 1e-7; } for (int j = 0; j < 3; j++) { grid_scale[x][y][z][j] /= w + 1e-7; } } std::vector<torch::Tensor> hv_cuda_forward( torch::Tensor points, torch::Tensor xyz_labels, torch::Tensor scale_labels, torch::Tensor obj_labels, torch::Tensor res, torch::Tensor num_rots) { auto corners = torch::stack({std::get<0>(torch::min(points, 0)), std::get<0>(torch::max(points, 0))}, 0); // 2 x 3 auto corner = corners[0]; // 3 auto diff = (corners[1] - corners[0]) / res; // 3 auto grid_obj = torch::zeros({diff[0].item().to<int>() + 1, diff[1].item().to<int>() + 1, diff[2].item().to<int>() + 1}, points.options()); auto grid_rot = torch::zeros({diff[0].item().to<int>() + 1, diff[1].item().to<int>() + 1, diff[2].item().to<int>() + 1, 2}, points.options()); auto grid_scale = torch::zeros({diff[0].item().to<int>() + 1, diff[1].item().to<int>() + 1, diff[2].item().to<int>() + 1, 3}, points.options()); // std::cout << grid.size(0) << ", " << grid.size(1) << ", " << grid.size(2) << std::endl; // std::cout << corner << std::endl; const int threads = 1024; const dim3 blocks((points.size(0) + threads - 1) / threads); AT_DISPATCH_FLOATING_TYPES(points.type(), "hv_forward_cuda", ([&] { hipLaunchKernelGGL(( hv_cuda_forward_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0, points.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(), xyz_labels.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(), scale_labels.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(), obj_labels.packed_accessor32<scalar_t, 1, torch::RestrictPtrTraits>(), grid_obj.packed_accessor32<scalar_t, 3, torch::RestrictPtrTraits>(), grid_rot.packed_accessor32<scalar_t, 4, torch::RestrictPtrTraits>(), grid_scale.packed_accessor32<scalar_t, 4, torch::RestrictPtrTraits>(), make_float3(corner[0].item().to<float>(), corner[1].item().to<float>(), corner[2].item().to<float>()), res.data<float>(), num_rots.data<int>() ); })); AT_DISPATCH_FLOATING_TYPES(points.type(), "hv_average_cuda", ([&] { hipLaunchKernelGGL(( hv_cuda_average_kernel<scalar_t>), dim3(dim3((grid_obj.size(0) + 7) / 8, (grid_obj.size(1) + 7) / 8, (grid_obj.size(2) + 7) / 8)), dim3(dim3(8, 8, 8)), 0, 0, grid_obj.packed_accessor32<scalar_t, 3, torch::RestrictPtrTraits>(), grid_rot.packed_accessor32<scalar_t, 4, torch::RestrictPtrTraits>(), grid_scale.packed_accessor32<scalar_t, 4, torch::RestrictPtrTraits>() ); })); return {grid_obj, grid_rot, grid_scale}; } template <typename scalar_t> __global__ void hv_cuda_backward_kernel( const torch::PackedTensorAccessor32<scalar_t, 3, torch::RestrictPtrTraits> grad_grid, const torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> points, const torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> xyz_labels, const torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> scale_labels, const torch::PackedTensorAccessor32<scalar_t, 1, torch::RestrictPtrTraits> obj_labels, // torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> d_points, torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> d_xyz_labels, torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> d_scale_labels, torch::PackedTensorAccessor32<scalar_t, 1, torch::RestrictPtrTraits> d_obj_labels, float3 corner, const float* __restrict__ res, const int* __restrict__ num_rots) { const int c = blockIdx.x * blockDim.x + threadIdx.x; if (c < points.size(0)) { scalar_t objness = obj_labels[c]; float3 corr = make_float3( xyz_labels[c][0] * scale_labels[c][0], xyz_labels[c][1] * scale_labels[c][1], xyz_labels[c][2] * scale_labels[c][2] ); float3 point = make_float3(points[c][0], points[c][1], points[c][2]); float rot_interval = 2 * 3.141592654f / (*num_rots); for (int i = 0; i < (*num_rots); i++) { float theta = i * rot_interval; float3 offset = make_float3(-cos(theta) * corr.x + sin(theta) * corr.z, -corr.y, -sin(theta) * corr.x - cos(theta) * corr.z); float3 center_grid = (point + offset - corner) / (*res); if (center_grid.x < 0 || center_grid.y < 0 || center_grid.z < 0 || center_grid.x >= grad_grid.size(0) - 1 || center_grid.y >= grad_grid.size(1) - 1 || center_grid.z >= grad_grid.size(2) - 1) { continue; } int3 center_grid_floor = make_int3(center_grid); int3 center_grid_ceil = center_grid_floor + 1; float3 residual = fracf(center_grid); float3 w0 = 1.f - residual; float3 w1 = residual; d_obj_labels[c] += grad_grid[center_grid_floor.x][center_grid_floor.y][center_grid_floor.z] * w0.x * w0.y * w0.z; d_obj_labels[c] += grad_grid[center_grid_floor.x][center_grid_floor.y][center_grid_ceil.z] * w0.x * w0.y * w1.z; d_obj_labels[c] += grad_grid[center_grid_floor.x][center_grid_ceil.y][center_grid_floor.z] * w0.x * w1.y * w0.z; d_obj_labels[c] += grad_grid[center_grid_floor.x][center_grid_ceil.y][center_grid_ceil.z] * w0.x * w1.y * w1.z; d_obj_labels[c] += grad_grid[center_grid_ceil.x][center_grid_floor.y][center_grid_floor.z] * w1.x * w0.y * w0.z; d_obj_labels[c] += grad_grid[center_grid_ceil.x][center_grid_floor.y][center_grid_ceil.z] * w1.x * w0.y * w1.z; d_obj_labels[c] += grad_grid[center_grid_ceil.x][center_grid_ceil.y][center_grid_floor.z] * w1.x * w1.y * w0.z; d_obj_labels[c] += grad_grid[center_grid_ceil.x][center_grid_ceil.y][center_grid_ceil.z] * w1.x * w1.y * w1.z; float3 dgrid_dcenter = make_float3( - grad_grid[center_grid_floor.x][center_grid_floor.y][center_grid_floor.z] * w0.y * w0.z - grad_grid[center_grid_floor.x][center_grid_floor.y][center_grid_ceil.z] * w0.y * w1.z - grad_grid[center_grid_floor.x][center_grid_ceil.y][center_grid_floor.z] * w1.y * w0.z - grad_grid[center_grid_floor.x][center_grid_ceil.y][center_grid_ceil.z] * w1.y * w1.z + grad_grid[center_grid_ceil.x][center_grid_floor.y][center_grid_floor.z] * w0.y * w0.z + grad_grid[center_grid_ceil.x][center_grid_floor.y][center_grid_ceil.z] * w0.y * w1.z + grad_grid[center_grid_ceil.x][center_grid_ceil.y][center_grid_floor.z] * w1.y * w0.z + grad_grid[center_grid_ceil.x][center_grid_ceil.y][center_grid_ceil.z] * w1.y * w1.z, - grad_grid[center_grid_floor.x][center_grid_floor.y][center_grid_floor.z] * w0.x * w0.z - grad_grid[center_grid_floor.x][center_grid_floor.y][center_grid_ceil.z] * w0.x * w1.z + grad_grid[center_grid_floor.x][center_grid_ceil.y][center_grid_floor.z] * w0.x * w0.z + grad_grid[center_grid_floor.x][center_grid_ceil.y][center_grid_ceil.z] * w0.x * w1.z - grad_grid[center_grid_ceil.x][center_grid_floor.y][center_grid_floor.z] * w1.x * w0.z - grad_grid[center_grid_ceil.x][center_grid_floor.y][center_grid_ceil.z] * w1.x * w1.z + grad_grid[center_grid_ceil.x][center_grid_ceil.y][center_grid_floor.z] * w1.x * w0.z + grad_grid[center_grid_ceil.x][center_grid_ceil.y][center_grid_ceil.z] * w1.x * w1.z, - grad_grid[center_grid_floor.x][center_grid_floor.y][center_grid_floor.z] * w0.x * w0.y + grad_grid[center_grid_floor.x][center_grid_floor.y][center_grid_ceil.z] * w0.x * w0.y - grad_grid[center_grid_floor.x][center_grid_ceil.y][center_grid_floor.z] * w0.x * w1.y + grad_grid[center_grid_floor.x][center_grid_ceil.y][center_grid_ceil.z] * w0.x * w1.y - grad_grid[center_grid_ceil.x][center_grid_floor.y][center_grid_floor.z] * w1.x * w0.y + grad_grid[center_grid_ceil.x][center_grid_floor.y][center_grid_ceil.z] * w1.x * w0.y - grad_grid[center_grid_ceil.x][center_grid_ceil.y][center_grid_floor.z] * w1.x * w1.y + grad_grid[center_grid_ceil.x][center_grid_ceil.y][center_grid_ceil.z] * w1.x * w1.y) * objness; // d_points[c][0] += dgrid_dcenter.x; // d_points[c][1] += dgrid_dcenter.y; // d_points[c][2] += dgrid_dcenter.z; float3 d_corr = make_float3(- cos(theta) * dgrid_dcenter.x - sin(theta) * dgrid_dcenter.z, -dgrid_dcenter.y, sin(theta) * dgrid_dcenter.x - cos(theta) * dgrid_dcenter.z); d_xyz_labels[c][0] += d_corr.x * scale_labels[c][0]; d_xyz_labels[c][1] += d_corr.y * scale_labels[c][1]; d_xyz_labels[c][2] += d_corr.z * scale_labels[c][2]; d_scale_labels[c][0] += d_corr.x * xyz_labels[c][0]; d_scale_labels[c][1] += d_corr.y * xyz_labels[c][1]; d_scale_labels[c][2] += d_corr.z * xyz_labels[c][2]; } } } std::vector<torch::Tensor> hv_cuda_backward( torch::Tensor grad_grid, torch::Tensor points, torch::Tensor xyz_labels, torch::Tensor scale_labels, torch::Tensor obj_labels, torch::Tensor res, torch::Tensor num_rots) { auto corners = torch::stack({std::get<0>(torch::min(points, 0)), std::get<0>(torch::max(points, 0))}, 0); // 2 x 3 auto corner = corners[0]; // 3 auto diff = (corners[1] - corners[0]) / res; // 3 // auto d_points = torch::zeros_like(points); auto d_xyz_labels = torch::zeros_like(xyz_labels); auto d_scale_labels = torch::zeros_like(scale_labels); auto d_obj_labels = torch::zeros_like(obj_labels); const int threads = 512; const dim3 blocks((points.size(0) + threads - 1) / threads); AT_DISPATCH_FLOATING_TYPES(points.type(), "hv_backward_cuda", ([&] { hipLaunchKernelGGL(( hv_cuda_backward_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0, grad_grid.packed_accessor32<scalar_t, 3, torch::RestrictPtrTraits>(), points.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(), xyz_labels.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(), scale_labels.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(), obj_labels.packed_accessor32<scalar_t, 1, torch::RestrictPtrTraits>(), // d_points.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(), d_xyz_labels.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(), d_scale_labels.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(), d_obj_labels.packed_accessor32<scalar_t, 1, torch::RestrictPtrTraits>(), make_float3(corner[0].item().to<float>(), corner[1].item().to<float>(), corner[2].item().to<float>()), res.data<float>(), num_rots.data<int>() ); })); return {d_xyz_labels, d_scale_labels, d_obj_labels}; }
376d55caab46a7fad3f3c3e55b02bf7bde538f56.cu
#include <torch/extension.h> #include <cuda.h> #include <cuda_runtime.h> #include "helper_math.h" #include <thrust/device_vector.h> #include <vector> #include <iostream> template <typename scalar_t> __global__ void hv_cuda_forward_kernel( const torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> points, const torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> xyz_labels, const torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> scale_labels, const torch::PackedTensorAccessor32<scalar_t, 1, torch::RestrictPtrTraits> obj_labels, torch::PackedTensorAccessor32<scalar_t, 3, torch::RestrictPtrTraits> grid_obj, torch::PackedTensorAccessor32<scalar_t, 4, torch::RestrictPtrTraits> grid_rot, torch::PackedTensorAccessor32<scalar_t, 4, torch::RestrictPtrTraits> grid_scale, float3 corner, const float* __restrict__ res, const int* __restrict__ num_rots) { const int c = blockIdx.x * blockDim.x + threadIdx.x; if (c < points.size(0)) { scalar_t objness = obj_labels[c]; float3 corr = make_float3( xyz_labels[c][0] * scale_labels[c][0], xyz_labels[c][1] * scale_labels[c][1], xyz_labels[c][2] * scale_labels[c][2] ); float3 point = make_float3(points[c][0], points[c][1], points[c][2]); const float rot_interval = 2 * 3.141592654f / (*num_rots); for (int i = 0; i < (*num_rots); i++) { float theta = i * rot_interval; float3 offset = make_float3(-cos(theta) * corr.x + sin(theta) * corr.z, -corr.y, -sin(theta) * corr.x - cos(theta) * corr.z); float3 center_grid = (point + offset - corner) / (*res); if (center_grid.x < 0 || center_grid.y < 0 || center_grid.z < 0 || center_grid.x >= grid_obj.size(0) - 1 || center_grid.y >= grid_obj.size(1) - 1 || center_grid.z >= grid_obj.size(2) - 1) { continue; } int3 center_grid_floor = make_int3(center_grid); int3 center_grid_ceil = center_grid_floor + 1; float3 residual = fracf(center_grid); float3 w0 = 1.f - residual; float3 w1 = residual; float lll = w0.x * w0.y * w0.z * objness; float llh = w0.x * w0.y * w1.z * objness; float lhl = w0.x * w1.y * w0.z * objness; float lhh = w0.x * w1.y * w1.z * objness; float hll = w1.x * w0.y * w0.z * objness; float hlh = w1.x * w0.y * w1.z * objness; float hhl = w1.x * w1.y * w0.z * objness; float hhh = w1.x * w1.y * w1.z * objness; atomicAdd(&grid_obj[center_grid_floor.x][center_grid_floor.y][center_grid_floor.z], lll); atomicAdd(&grid_obj[center_grid_floor.x][center_grid_floor.y][center_grid_ceil.z], llh); atomicAdd(&grid_obj[center_grid_floor.x][center_grid_ceil.y][center_grid_floor.z], lhl); atomicAdd(&grid_obj[center_grid_floor.x][center_grid_ceil.y][center_grid_ceil.z], lhh); atomicAdd(&grid_obj[center_grid_ceil.x][center_grid_floor.y][center_grid_floor.z], hll); atomicAdd(&grid_obj[center_grid_ceil.x][center_grid_floor.y][center_grid_ceil.z], hlh); atomicAdd(&grid_obj[center_grid_ceil.x][center_grid_ceil.y][center_grid_floor.z], hhl); atomicAdd(&grid_obj[center_grid_ceil.x][center_grid_ceil.y][center_grid_ceil.z], hhh); float rot_vec[2] = {cos(theta), sin(theta)}; for (int j = 0; j < 2; j++) { float rot = rot_vec[j]; atomicAdd(&grid_rot[center_grid_floor.x][center_grid_floor.y][center_grid_floor.z][j], lll * rot); atomicAdd(&grid_rot[center_grid_floor.x][center_grid_floor.y][center_grid_ceil.z][j], llh * rot); atomicAdd(&grid_rot[center_grid_floor.x][center_grid_ceil.y][center_grid_floor.z][j], lhl * rot); atomicAdd(&grid_rot[center_grid_floor.x][center_grid_ceil.y][center_grid_ceil.z][j], lhh * rot); atomicAdd(&grid_rot[center_grid_ceil.x][center_grid_floor.y][center_grid_floor.z][j], hll * rot); atomicAdd(&grid_rot[center_grid_ceil.x][center_grid_floor.y][center_grid_ceil.z][j], hlh * rot); atomicAdd(&grid_rot[center_grid_ceil.x][center_grid_ceil.y][center_grid_floor.z][j], hhl * rot); atomicAdd(&grid_rot[center_grid_ceil.x][center_grid_ceil.y][center_grid_ceil.z][j], hhh * rot); } for (int j = 0; j < 3; j++) { float scale = scale_labels[c][j]; atomicAdd(&grid_scale[center_grid_floor.x][center_grid_floor.y][center_grid_floor.z][j], lll * scale); atomicAdd(&grid_scale[center_grid_floor.x][center_grid_floor.y][center_grid_ceil.z][j], llh * scale); atomicAdd(&grid_scale[center_grid_floor.x][center_grid_ceil.y][center_grid_floor.z][j], lhl * scale); atomicAdd(&grid_scale[center_grid_floor.x][center_grid_ceil.y][center_grid_ceil.z][j], lhh * scale); atomicAdd(&grid_scale[center_grid_ceil.x][center_grid_floor.y][center_grid_floor.z][j], hll * scale); atomicAdd(&grid_scale[center_grid_ceil.x][center_grid_floor.y][center_grid_ceil.z][j], hlh * scale); atomicAdd(&grid_scale[center_grid_ceil.x][center_grid_ceil.y][center_grid_floor.z][j], hhl * scale); atomicAdd(&grid_scale[center_grid_ceil.x][center_grid_ceil.y][center_grid_ceil.z][j], hhh * scale); } } } } template <typename scalar_t> __global__ void hv_cuda_average_kernel( const torch::PackedTensorAccessor32<scalar_t, 3, torch::RestrictPtrTraits> grid, torch::PackedTensorAccessor32<scalar_t, 4, torch::RestrictPtrTraits> grid_rot, torch::PackedTensorAccessor32<scalar_t, 4, torch::RestrictPtrTraits> grid_scale) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; const int z = blockIdx.z * blockDim.z + threadIdx.z; if (x >= grid.size(0) || y >= grid.size(1) || z >= grid.size(2)) return; float w = grid[x][y][z]; for (int j = 0; j < 2; j++) { grid_rot[x][y][z][j] /= w + 1e-7; } for (int j = 0; j < 3; j++) { grid_scale[x][y][z][j] /= w + 1e-7; } } std::vector<torch::Tensor> hv_cuda_forward( torch::Tensor points, torch::Tensor xyz_labels, torch::Tensor scale_labels, torch::Tensor obj_labels, torch::Tensor res, torch::Tensor num_rots) { auto corners = torch::stack({std::get<0>(torch::min(points, 0)), std::get<0>(torch::max(points, 0))}, 0); // 2 x 3 auto corner = corners[0]; // 3 auto diff = (corners[1] - corners[0]) / res; // 3 auto grid_obj = torch::zeros({diff[0].item().to<int>() + 1, diff[1].item().to<int>() + 1, diff[2].item().to<int>() + 1}, points.options()); auto grid_rot = torch::zeros({diff[0].item().to<int>() + 1, diff[1].item().to<int>() + 1, diff[2].item().to<int>() + 1, 2}, points.options()); auto grid_scale = torch::zeros({diff[0].item().to<int>() + 1, diff[1].item().to<int>() + 1, diff[2].item().to<int>() + 1, 3}, points.options()); // std::cout << grid.size(0) << ", " << grid.size(1) << ", " << grid.size(2) << std::endl; // std::cout << corner << std::endl; const int threads = 1024; const dim3 blocks((points.size(0) + threads - 1) / threads); AT_DISPATCH_FLOATING_TYPES(points.type(), "hv_forward_cuda", ([&] { hv_cuda_forward_kernel<scalar_t><<<blocks, threads>>>( points.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(), xyz_labels.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(), scale_labels.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(), obj_labels.packed_accessor32<scalar_t, 1, torch::RestrictPtrTraits>(), grid_obj.packed_accessor32<scalar_t, 3, torch::RestrictPtrTraits>(), grid_rot.packed_accessor32<scalar_t, 4, torch::RestrictPtrTraits>(), grid_scale.packed_accessor32<scalar_t, 4, torch::RestrictPtrTraits>(), make_float3(corner[0].item().to<float>(), corner[1].item().to<float>(), corner[2].item().to<float>()), res.data<float>(), num_rots.data<int>() ); })); AT_DISPATCH_FLOATING_TYPES(points.type(), "hv_average_cuda", ([&] { hv_cuda_average_kernel<scalar_t><<<dim3((grid_obj.size(0) + 7) / 8, (grid_obj.size(1) + 7) / 8, (grid_obj.size(2) + 7) / 8), dim3(8, 8, 8)>>>( grid_obj.packed_accessor32<scalar_t, 3, torch::RestrictPtrTraits>(), grid_rot.packed_accessor32<scalar_t, 4, torch::RestrictPtrTraits>(), grid_scale.packed_accessor32<scalar_t, 4, torch::RestrictPtrTraits>() ); })); return {grid_obj, grid_rot, grid_scale}; } template <typename scalar_t> __global__ void hv_cuda_backward_kernel( const torch::PackedTensorAccessor32<scalar_t, 3, torch::RestrictPtrTraits> grad_grid, const torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> points, const torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> xyz_labels, const torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> scale_labels, const torch::PackedTensorAccessor32<scalar_t, 1, torch::RestrictPtrTraits> obj_labels, // torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> d_points, torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> d_xyz_labels, torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> d_scale_labels, torch::PackedTensorAccessor32<scalar_t, 1, torch::RestrictPtrTraits> d_obj_labels, float3 corner, const float* __restrict__ res, const int* __restrict__ num_rots) { const int c = blockIdx.x * blockDim.x + threadIdx.x; if (c < points.size(0)) { scalar_t objness = obj_labels[c]; float3 corr = make_float3( xyz_labels[c][0] * scale_labels[c][0], xyz_labels[c][1] * scale_labels[c][1], xyz_labels[c][2] * scale_labels[c][2] ); float3 point = make_float3(points[c][0], points[c][1], points[c][2]); float rot_interval = 2 * 3.141592654f / (*num_rots); for (int i = 0; i < (*num_rots); i++) { float theta = i * rot_interval; float3 offset = make_float3(-cos(theta) * corr.x + sin(theta) * corr.z, -corr.y, -sin(theta) * corr.x - cos(theta) * corr.z); float3 center_grid = (point + offset - corner) / (*res); if (center_grid.x < 0 || center_grid.y < 0 || center_grid.z < 0 || center_grid.x >= grad_grid.size(0) - 1 || center_grid.y >= grad_grid.size(1) - 1 || center_grid.z >= grad_grid.size(2) - 1) { continue; } int3 center_grid_floor = make_int3(center_grid); int3 center_grid_ceil = center_grid_floor + 1; float3 residual = fracf(center_grid); float3 w0 = 1.f - residual; float3 w1 = residual; d_obj_labels[c] += grad_grid[center_grid_floor.x][center_grid_floor.y][center_grid_floor.z] * w0.x * w0.y * w0.z; d_obj_labels[c] += grad_grid[center_grid_floor.x][center_grid_floor.y][center_grid_ceil.z] * w0.x * w0.y * w1.z; d_obj_labels[c] += grad_grid[center_grid_floor.x][center_grid_ceil.y][center_grid_floor.z] * w0.x * w1.y * w0.z; d_obj_labels[c] += grad_grid[center_grid_floor.x][center_grid_ceil.y][center_grid_ceil.z] * w0.x * w1.y * w1.z; d_obj_labels[c] += grad_grid[center_grid_ceil.x][center_grid_floor.y][center_grid_floor.z] * w1.x * w0.y * w0.z; d_obj_labels[c] += grad_grid[center_grid_ceil.x][center_grid_floor.y][center_grid_ceil.z] * w1.x * w0.y * w1.z; d_obj_labels[c] += grad_grid[center_grid_ceil.x][center_grid_ceil.y][center_grid_floor.z] * w1.x * w1.y * w0.z; d_obj_labels[c] += grad_grid[center_grid_ceil.x][center_grid_ceil.y][center_grid_ceil.z] * w1.x * w1.y * w1.z; float3 dgrid_dcenter = make_float3( - grad_grid[center_grid_floor.x][center_grid_floor.y][center_grid_floor.z] * w0.y * w0.z - grad_grid[center_grid_floor.x][center_grid_floor.y][center_grid_ceil.z] * w0.y * w1.z - grad_grid[center_grid_floor.x][center_grid_ceil.y][center_grid_floor.z] * w1.y * w0.z - grad_grid[center_grid_floor.x][center_grid_ceil.y][center_grid_ceil.z] * w1.y * w1.z + grad_grid[center_grid_ceil.x][center_grid_floor.y][center_grid_floor.z] * w0.y * w0.z + grad_grid[center_grid_ceil.x][center_grid_floor.y][center_grid_ceil.z] * w0.y * w1.z + grad_grid[center_grid_ceil.x][center_grid_ceil.y][center_grid_floor.z] * w1.y * w0.z + grad_grid[center_grid_ceil.x][center_grid_ceil.y][center_grid_ceil.z] * w1.y * w1.z, - grad_grid[center_grid_floor.x][center_grid_floor.y][center_grid_floor.z] * w0.x * w0.z - grad_grid[center_grid_floor.x][center_grid_floor.y][center_grid_ceil.z] * w0.x * w1.z + grad_grid[center_grid_floor.x][center_grid_ceil.y][center_grid_floor.z] * w0.x * w0.z + grad_grid[center_grid_floor.x][center_grid_ceil.y][center_grid_ceil.z] * w0.x * w1.z - grad_grid[center_grid_ceil.x][center_grid_floor.y][center_grid_floor.z] * w1.x * w0.z - grad_grid[center_grid_ceil.x][center_grid_floor.y][center_grid_ceil.z] * w1.x * w1.z + grad_grid[center_grid_ceil.x][center_grid_ceil.y][center_grid_floor.z] * w1.x * w0.z + grad_grid[center_grid_ceil.x][center_grid_ceil.y][center_grid_ceil.z] * w1.x * w1.z, - grad_grid[center_grid_floor.x][center_grid_floor.y][center_grid_floor.z] * w0.x * w0.y + grad_grid[center_grid_floor.x][center_grid_floor.y][center_grid_ceil.z] * w0.x * w0.y - grad_grid[center_grid_floor.x][center_grid_ceil.y][center_grid_floor.z] * w0.x * w1.y + grad_grid[center_grid_floor.x][center_grid_ceil.y][center_grid_ceil.z] * w0.x * w1.y - grad_grid[center_grid_ceil.x][center_grid_floor.y][center_grid_floor.z] * w1.x * w0.y + grad_grid[center_grid_ceil.x][center_grid_floor.y][center_grid_ceil.z] * w1.x * w0.y - grad_grid[center_grid_ceil.x][center_grid_ceil.y][center_grid_floor.z] * w1.x * w1.y + grad_grid[center_grid_ceil.x][center_grid_ceil.y][center_grid_ceil.z] * w1.x * w1.y) * objness; // d_points[c][0] += dgrid_dcenter.x; // d_points[c][1] += dgrid_dcenter.y; // d_points[c][2] += dgrid_dcenter.z; float3 d_corr = make_float3(- cos(theta) * dgrid_dcenter.x - sin(theta) * dgrid_dcenter.z, -dgrid_dcenter.y, sin(theta) * dgrid_dcenter.x - cos(theta) * dgrid_dcenter.z); d_xyz_labels[c][0] += d_corr.x * scale_labels[c][0]; d_xyz_labels[c][1] += d_corr.y * scale_labels[c][1]; d_xyz_labels[c][2] += d_corr.z * scale_labels[c][2]; d_scale_labels[c][0] += d_corr.x * xyz_labels[c][0]; d_scale_labels[c][1] += d_corr.y * xyz_labels[c][1]; d_scale_labels[c][2] += d_corr.z * xyz_labels[c][2]; } } } std::vector<torch::Tensor> hv_cuda_backward( torch::Tensor grad_grid, torch::Tensor points, torch::Tensor xyz_labels, torch::Tensor scale_labels, torch::Tensor obj_labels, torch::Tensor res, torch::Tensor num_rots) { auto corners = torch::stack({std::get<0>(torch::min(points, 0)), std::get<0>(torch::max(points, 0))}, 0); // 2 x 3 auto corner = corners[0]; // 3 auto diff = (corners[1] - corners[0]) / res; // 3 // auto d_points = torch::zeros_like(points); auto d_xyz_labels = torch::zeros_like(xyz_labels); auto d_scale_labels = torch::zeros_like(scale_labels); auto d_obj_labels = torch::zeros_like(obj_labels); const int threads = 512; const dim3 blocks((points.size(0) + threads - 1) / threads); AT_DISPATCH_FLOATING_TYPES(points.type(), "hv_backward_cuda", ([&] { hv_cuda_backward_kernel<scalar_t><<<blocks, threads>>>( grad_grid.packed_accessor32<scalar_t, 3, torch::RestrictPtrTraits>(), points.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(), xyz_labels.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(), scale_labels.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(), obj_labels.packed_accessor32<scalar_t, 1, torch::RestrictPtrTraits>(), // d_points.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(), d_xyz_labels.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(), d_scale_labels.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(), d_obj_labels.packed_accessor32<scalar_t, 1, torch::RestrictPtrTraits>(), make_float3(corner[0].item().to<float>(), corner[1].item().to<float>(), corner[2].item().to<float>()), res.data<float>(), num_rots.data<int>() ); })); return {d_xyz_labels, d_scale_labels, d_obj_labels}; }
47481449e2c337a2d2ec39baf98401ef7a1022c7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "equalizer.cuh" #include "error_checker.cuh" extern "C" { #include <stdio.h> #include "cexception/lib/CException.h" #include "log.h" #include "errors.h" #include "arguments.h" #include "defines.h" } #define BLOCK_SIZE (512) __global__ void compute_histogram(const float *image, unsigned int *bins, unsigned int num_elements) { unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; extern __shared__ unsigned int bins_s[]; for (unsigned int binIdx = threadIdx.x; binIdx < N_BINS; binIdx += blockDim.x) { bins_s[binIdx] = 0; } __syncthreads(); for (unsigned int i = tid; i < num_elements; i += blockDim.x * gridDim.x) { atomicAdd(&(bins_s[(unsigned int)__float2int_rn(image[i] * (N_BINS - 1))]), 1); } __syncthreads(); for (unsigned int binIdx = threadIdx.x; binIdx < N_BINS; binIdx += blockDim.x) { atomicAdd(&(bins[binIdx]), bins_s[binIdx]); } } __global__ void convert_rgb_to_hsl(const rgb_pixel_t *rgb_image, hsl_image_t hsl_image, unsigned int num_elements) { unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; if(tid < num_elements) { const rgb_pixel_t rgb_pixel = *(rgb_pixel_t *)(&rgb_image[tid]); hsl_pixel_t hsl_pixel = { .h = 0, .s = 0, .l = 0 }; rgb_to_hsl(rgb_pixel, &hsl_pixel); hsl_image.h[tid] = hsl_pixel.h; hsl_image.s[tid] = hsl_pixel.s; hsl_image.l[tid] = hsl_pixel.l; } } __global__ void convert_hsl_to_rgb(const hsl_image_t hsl_image, rgb_pixel_t *rgb_image, unsigned int num_elements) { unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; if(tid < num_elements) { rgb_pixel_t *pixel_offset = &rgb_image[tid]; rgb_pixel_t rgb_pixel = { .r = 0, .g = 0, .b = 0, .a = 0xFF }; hsl_pixel_t hsl_pixel = { .h = hsl_image.h[tid], .s = hsl_image.s[tid], .l = hsl_image.l[tid] }; hsl_to_rgb(hsl_pixel, &rgb_pixel); pixel_offset->r = rgb_pixel.r; pixel_offset->g = rgb_pixel.g; pixel_offset->b = rgb_pixel.b; pixel_offset->a = rgb_pixel.a; } } __global__ void compute_cdf(unsigned int *input, unsigned int *output, int input_size) { __shared__ unsigned int sh_out[BLOCK_SIZE]; int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < input_size) { sh_out[threadIdx.x] = input[tid]; } for (unsigned int stride = 1; stride < blockDim.x; stride *= 2) { __syncthreads(); if(threadIdx.x >= stride) { sh_out[threadIdx.x] += sh_out[threadIdx.x - stride]; } } __syncthreads(); if (tid < input_size) { output[tid] = sh_out[threadIdx.x]; } } __global__ void compute_normalized_cdf(unsigned int *cdf, float *cdf_norm, int cdf_size, int norm_factor) { unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; if(tid < cdf_size) { cdf_norm[tid] = ((float)(cdf[tid] - cdf[0]) / (norm_factor - cdf[0])) * (cdf_size - 1); } } __global__ void apply_normalized_cdf(const float *cdf_norm, const hsl_image_t hsl_image, int cdf_size, int image_size) { unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; if(tid < image_size) { hsl_image.l[tid] = cdf_norm[(unsigned int)__float2int_rn(hsl_image.l[tid] * (cdf_size - 1))] / (cdf_size - 1); } } int equalize(rgb_pixel_t *input, unsigned int width, unsigned int height, uint8_t **output) { CEXCEPTION_T e = NO_ERROR; int blocksPerGrid = 0; rgb_pixel_t *d_rgb_image = NULL; rgb_pixel_t *d_output_image = NULL; unsigned int *d_histogram = NULL; unsigned int *d_cdf = NULL; float *d_cdf_norm = NULL; hsl_image_t d_hsl_image = { .h = NULL, .s = NULL, .l = NULL }; Try { // Allocate memory for the image on the device gpuErrorCheck( hipMalloc((void**)&d_rgb_image, width * height * sizeof(rgb_pixel_t)) ); gpuErrorCheck( hipMemcpy(d_rgb_image, input, width * height * sizeof(rgb_pixel_t), hipMemcpyHostToDevice) ); gpuErrorCheck( hipMalloc((void**)&(d_hsl_image.h), width * height * sizeof(int)) ); gpuErrorCheck( hipMalloc((void**)&(d_hsl_image.s), width * height * sizeof(float)) ); gpuErrorCheck( hipMalloc((void**)&(d_hsl_image.l), width * height * sizeof(float)) ); // Allocate memory for the output *output = (uint8_t *)calloc(width * height, sizeof(rgb_pixel_t)); check_pointer(*output); gpuErrorCheck( hipMalloc((void**)&d_output_image, width * height * sizeof(rgb_pixel_t)) ); gpuErrorCheck( hipMalloc((void**)&d_histogram, N_BINS * sizeof(unsigned int)) ); gpuErrorCheck( hipMalloc((void**)&d_cdf, N_BINS * sizeof(unsigned int)) ); gpuErrorCheck( hipMalloc((void**)&d_cdf_norm, N_BINS * sizeof(float)) ); // ************************************** // STEP 1 - convert every pixel from RGB to HSL blocksPerGrid = ((width * height) + BLOCK_SIZE - 1) / BLOCK_SIZE; hipLaunchKernelGGL(( convert_rgb_to_hsl), dim3(blocksPerGrid), dim3(BLOCK_SIZE), 0, 0, d_rgb_image, d_hsl_image, (width * height)); // ************************************** // STEP 2 - compute the histogram of the luminance for each pixel blocksPerGrid = 30; hipLaunchKernelGGL(( compute_histogram), dim3(blocksPerGrid), dim3(BLOCK_SIZE), N_BINS * sizeof(unsigned int), 0, d_hsl_image.l, d_histogram, (width * height)); // ************************************** // STEP 3 - compute the cumulative distribution function by applying the parallelized // version of the scan algorithm blocksPerGrid = (N_BINS + BLOCK_SIZE - 1) / BLOCK_SIZE; hipLaunchKernelGGL(( compute_cdf), dim3(blocksPerGrid), dim3(BLOCK_SIZE), 0, 0, d_histogram, d_cdf, N_BINS); // ************************************** // STEP 4 - compute the normalized cumulative distribution function blocksPerGrid = (N_BINS + BLOCK_SIZE - 1) / BLOCK_SIZE; hipLaunchKernelGGL(( compute_normalized_cdf), dim3(blocksPerGrid), dim3(BLOCK_SIZE), 0, 0, d_cdf, d_cdf_norm, N_BINS, (width * height)); // ************************************** // STEP 5 - apply the normalized CDF to the luminance for each pixel blocksPerGrid = ((width * height) + BLOCK_SIZE - 1) / BLOCK_SIZE; hipLaunchKernelGGL(( apply_normalized_cdf), dim3(blocksPerGrid), dim3(BLOCK_SIZE), 0, 0, d_cdf_norm, d_hsl_image, N_BINS, (width * height)); // ************************************** // STEP 6 - convert each HSL pixel back to RGB blocksPerGrid = ((width * height) + BLOCK_SIZE - 1) / BLOCK_SIZE; hipLaunchKernelGGL(( convert_hsl_to_rgb), dim3(blocksPerGrid), dim3(BLOCK_SIZE), 0, 0, d_hsl_image, d_output_image, width * height); // Copy the result back from the device gpuErrorCheck( hipMemcpy(*output, d_output_image, width * height * sizeof(rgb_pixel_t), hipMemcpyDeviceToHost) ); if(arguments.log_histogram) { unsigned int *h_histogram = NULL; unsigned int *h_cdf = NULL; float *h_cdf_norm = NULL; h_histogram = (unsigned int *)calloc(N_BINS, sizeof(unsigned int)); h_cdf = (unsigned int *)calloc(N_BINS, sizeof(unsigned int)); h_cdf_norm = (float *)calloc(N_BINS, sizeof(float)); check_pointer(h_histogram); check_pointer(h_cdf); check_pointer(h_cdf_norm); gpuErrorCheck( hipMemcpy(h_histogram, d_histogram, N_BINS * sizeof(unsigned int), hipMemcpyDeviceToHost) ); gpuErrorCheck( hipMemcpy(h_cdf, d_cdf, N_BINS * sizeof(unsigned int), hipMemcpyDeviceToHost) ); gpuErrorCheck( hipMemcpy(h_cdf_norm, d_cdf_norm, N_BINS * sizeof(float), hipMemcpyDeviceToHost) ); log_info("Printing histogram.."); for(int bin = 0; bin < N_BINS; bin++) { log_info("%d:%d", bin, h_histogram[bin]); } log_info("Printing cdf.."); for(int bin = 0; bin < N_BINS; bin++) { log_info("%d:%d", bin, h_cdf[bin]); } log_info("Printing normalized cdf.."); for(int bin = 0; bin < N_BINS; bin++) { log_info("%d:%g", bin, h_cdf_norm[bin]); } free(h_histogram); free(h_cdf); free(h_cdf_norm); } } Catch(e) { log_error("Caught exception %d while equalizing image!", e); } hipFree(d_rgb_image); hipFree(d_output_image); hipFree(d_histogram); hipFree(d_cdf); hipFree(d_cdf_norm); hipFree(d_hsl_image.h); hipFree(d_hsl_image.s); hipFree(d_hsl_image.l); return e; }
47481449e2c337a2d2ec39baf98401ef7a1022c7.cu
#include "equalizer.cuh" #include "error_checker.cuh" extern "C" { #include <stdio.h> #include "cexception/lib/CException.h" #include "log.h" #include "errors.h" #include "arguments.h" #include "defines.h" } #define BLOCK_SIZE (512) __global__ void compute_histogram(const float *image, unsigned int *bins, unsigned int num_elements) { unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; extern __shared__ unsigned int bins_s[]; for (unsigned int binIdx = threadIdx.x; binIdx < N_BINS; binIdx += blockDim.x) { bins_s[binIdx] = 0; } __syncthreads(); for (unsigned int i = tid; i < num_elements; i += blockDim.x * gridDim.x) { atomicAdd(&(bins_s[(unsigned int)__float2int_rn(image[i] * (N_BINS - 1))]), 1); } __syncthreads(); for (unsigned int binIdx = threadIdx.x; binIdx < N_BINS; binIdx += blockDim.x) { atomicAdd(&(bins[binIdx]), bins_s[binIdx]); } } __global__ void convert_rgb_to_hsl(const rgb_pixel_t *rgb_image, hsl_image_t hsl_image, unsigned int num_elements) { unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; if(tid < num_elements) { const rgb_pixel_t rgb_pixel = *(rgb_pixel_t *)(&rgb_image[tid]); hsl_pixel_t hsl_pixel = { .h = 0, .s = 0, .l = 0 }; rgb_to_hsl(rgb_pixel, &hsl_pixel); hsl_image.h[tid] = hsl_pixel.h; hsl_image.s[tid] = hsl_pixel.s; hsl_image.l[tid] = hsl_pixel.l; } } __global__ void convert_hsl_to_rgb(const hsl_image_t hsl_image, rgb_pixel_t *rgb_image, unsigned int num_elements) { unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; if(tid < num_elements) { rgb_pixel_t *pixel_offset = &rgb_image[tid]; rgb_pixel_t rgb_pixel = { .r = 0, .g = 0, .b = 0, .a = 0xFF }; hsl_pixel_t hsl_pixel = { .h = hsl_image.h[tid], .s = hsl_image.s[tid], .l = hsl_image.l[tid] }; hsl_to_rgb(hsl_pixel, &rgb_pixel); pixel_offset->r = rgb_pixel.r; pixel_offset->g = rgb_pixel.g; pixel_offset->b = rgb_pixel.b; pixel_offset->a = rgb_pixel.a; } } __global__ void compute_cdf(unsigned int *input, unsigned int *output, int input_size) { __shared__ unsigned int sh_out[BLOCK_SIZE]; int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < input_size) { sh_out[threadIdx.x] = input[tid]; } for (unsigned int stride = 1; stride < blockDim.x; stride *= 2) { __syncthreads(); if(threadIdx.x >= stride) { sh_out[threadIdx.x] += sh_out[threadIdx.x - stride]; } } __syncthreads(); if (tid < input_size) { output[tid] = sh_out[threadIdx.x]; } } __global__ void compute_normalized_cdf(unsigned int *cdf, float *cdf_norm, int cdf_size, int norm_factor) { unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; if(tid < cdf_size) { cdf_norm[tid] = ((float)(cdf[tid] - cdf[0]) / (norm_factor - cdf[0])) * (cdf_size - 1); } } __global__ void apply_normalized_cdf(const float *cdf_norm, const hsl_image_t hsl_image, int cdf_size, int image_size) { unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; if(tid < image_size) { hsl_image.l[tid] = cdf_norm[(unsigned int)__float2int_rn(hsl_image.l[tid] * (cdf_size - 1))] / (cdf_size - 1); } } int equalize(rgb_pixel_t *input, unsigned int width, unsigned int height, uint8_t **output) { CEXCEPTION_T e = NO_ERROR; int blocksPerGrid = 0; rgb_pixel_t *d_rgb_image = NULL; rgb_pixel_t *d_output_image = NULL; unsigned int *d_histogram = NULL; unsigned int *d_cdf = NULL; float *d_cdf_norm = NULL; hsl_image_t d_hsl_image = { .h = NULL, .s = NULL, .l = NULL }; Try { // Allocate memory for the image on the device gpuErrorCheck( cudaMalloc((void**)&d_rgb_image, width * height * sizeof(rgb_pixel_t)) ); gpuErrorCheck( cudaMemcpy(d_rgb_image, input, width * height * sizeof(rgb_pixel_t), cudaMemcpyHostToDevice) ); gpuErrorCheck( cudaMalloc((void**)&(d_hsl_image.h), width * height * sizeof(int)) ); gpuErrorCheck( cudaMalloc((void**)&(d_hsl_image.s), width * height * sizeof(float)) ); gpuErrorCheck( cudaMalloc((void**)&(d_hsl_image.l), width * height * sizeof(float)) ); // Allocate memory for the output *output = (uint8_t *)calloc(width * height, sizeof(rgb_pixel_t)); check_pointer(*output); gpuErrorCheck( cudaMalloc((void**)&d_output_image, width * height * sizeof(rgb_pixel_t)) ); gpuErrorCheck( cudaMalloc((void**)&d_histogram, N_BINS * sizeof(unsigned int)) ); gpuErrorCheck( cudaMalloc((void**)&d_cdf, N_BINS * sizeof(unsigned int)) ); gpuErrorCheck( cudaMalloc((void**)&d_cdf_norm, N_BINS * sizeof(float)) ); // ************************************** // STEP 1 - convert every pixel from RGB to HSL blocksPerGrid = ((width * height) + BLOCK_SIZE - 1) / BLOCK_SIZE; convert_rgb_to_hsl<<<blocksPerGrid, BLOCK_SIZE>>>(d_rgb_image, d_hsl_image, (width * height)); // ************************************** // STEP 2 - compute the histogram of the luminance for each pixel blocksPerGrid = 30; compute_histogram<<<blocksPerGrid, BLOCK_SIZE, N_BINS * sizeof(unsigned int)>>>(d_hsl_image.l, d_histogram, (width * height)); // ************************************** // STEP 3 - compute the cumulative distribution function by applying the parallelized // version of the scan algorithm blocksPerGrid = (N_BINS + BLOCK_SIZE - 1) / BLOCK_SIZE; compute_cdf<<<blocksPerGrid, BLOCK_SIZE>>>(d_histogram, d_cdf, N_BINS); // ************************************** // STEP 4 - compute the normalized cumulative distribution function blocksPerGrid = (N_BINS + BLOCK_SIZE - 1) / BLOCK_SIZE; compute_normalized_cdf<<<blocksPerGrid, BLOCK_SIZE>>>(d_cdf, d_cdf_norm, N_BINS, (width * height)); // ************************************** // STEP 5 - apply the normalized CDF to the luminance for each pixel blocksPerGrid = ((width * height) + BLOCK_SIZE - 1) / BLOCK_SIZE; apply_normalized_cdf<<<blocksPerGrid, BLOCK_SIZE>>>(d_cdf_norm, d_hsl_image, N_BINS, (width * height)); // ************************************** // STEP 6 - convert each HSL pixel back to RGB blocksPerGrid = ((width * height) + BLOCK_SIZE - 1) / BLOCK_SIZE; convert_hsl_to_rgb<<<blocksPerGrid, BLOCK_SIZE>>>(d_hsl_image, d_output_image, width * height); // Copy the result back from the device gpuErrorCheck( cudaMemcpy(*output, d_output_image, width * height * sizeof(rgb_pixel_t), cudaMemcpyDeviceToHost) ); if(arguments.log_histogram) { unsigned int *h_histogram = NULL; unsigned int *h_cdf = NULL; float *h_cdf_norm = NULL; h_histogram = (unsigned int *)calloc(N_BINS, sizeof(unsigned int)); h_cdf = (unsigned int *)calloc(N_BINS, sizeof(unsigned int)); h_cdf_norm = (float *)calloc(N_BINS, sizeof(float)); check_pointer(h_histogram); check_pointer(h_cdf); check_pointer(h_cdf_norm); gpuErrorCheck( cudaMemcpy(h_histogram, d_histogram, N_BINS * sizeof(unsigned int), cudaMemcpyDeviceToHost) ); gpuErrorCheck( cudaMemcpy(h_cdf, d_cdf, N_BINS * sizeof(unsigned int), cudaMemcpyDeviceToHost) ); gpuErrorCheck( cudaMemcpy(h_cdf_norm, d_cdf_norm, N_BINS * sizeof(float), cudaMemcpyDeviceToHost) ); log_info("Printing histogram.."); for(int bin = 0; bin < N_BINS; bin++) { log_info("%d:%d", bin, h_histogram[bin]); } log_info("Printing cdf.."); for(int bin = 0; bin < N_BINS; bin++) { log_info("%d:%d", bin, h_cdf[bin]); } log_info("Printing normalized cdf.."); for(int bin = 0; bin < N_BINS; bin++) { log_info("%d:%g", bin, h_cdf_norm[bin]); } free(h_histogram); free(h_cdf); free(h_cdf_norm); } } Catch(e) { log_error("Caught exception %d while equalizing image!", e); } cudaFree(d_rgb_image); cudaFree(d_output_image); cudaFree(d_histogram); cudaFree(d_cdf); cudaFree(d_cdf_norm); cudaFree(d_hsl_image.h); cudaFree(d_hsl_image.s); cudaFree(d_hsl_image.l); return e; }
b4d9a4bbbeb222731eada0af812092bb2cc3c047.hip
// !!! This is a file automatically generated by hipify!!! /* * multigpu.cu * * Created on: 01/dic/2014 * Author: Edoardo Mondoni */ #include "samples.h" #include "sim_parameters.h" #include "adm_matrix.h" #include "err.h" #include "multigpu.h" #include "init_osc.h" #include "init_output.h" #include "simulate.h" #include "output_file.h" #include "rng.h" #include <hiprand/hiprand_kernel.h> #include <stdio.h> #include <stdarg.h> #include <stdlib.h> /*** GLOBAL VARIABLE DEFINITIONS ***/ unsigned int h_n_osc; unsigned int h_n_per; unsigned int h_noisy; /*** CONSTANT MEMORY DEFINITIONS ***/ __constant__ unsigned int d_n_osc; __constant__ unsigned int d_n_per; int main(int argc, char** argv) { double *d_matrix, *d_periods, *d_time, *d_alpha, *d_theta; hiprandState_t *d_states; /* PRELIMINARY PHASE * The arguments are validated and stored in the appropriate variables, including the samples * of the Gamma(t) and Vo(t) functions along with the time grid used for their sampling. * The program initializes the oscillators' properties and the simulation based on the constant * parameters #defined in macros and those derivable from the Gamma(t) and Vo(t) functions. * Then, the transadmittance matrix (d_matrix) is randomly generated and stored in device memory * for later use. */ process_arguments(argc, argv); initialize_rng(h_n_osc, 17021991, &d_states); allocate_samples(0); // copies Gamma(t) and Vo(t) samples (and instants) into dev initialize_simulation(&d_periods, &d_time, d_states); generate_matrix(&d_matrix, d_states); /* OUTPUT INITIALIZATION PHASE * The d_alpha and d_theta pointers are initialized (i.e. memory is allocated which will contain * the results of the simulation). Furthermore, the alpha_k(0) are stored in the appropriate * memory locations (they can be generated here since they are randomized). */ initialize_output(&d_alpha, &d_theta, d_periods, d_states); /* SIMULATION PHASE * This is the core of the program: it is where the simulation happens. */ perform_simulation(d_alpha, d_theta, d_time, d_matrix, d_periods, h_noisy, d_states); /* OUTPUT COPY PHASE * The results of the simulation are copied back to host memory and then to a file. */ double *h_alpha, *h_theta, *h_time; h_alpha = (double *) malloc(sizeof(double) * h_n_osc * h_n_steps); h_theta = (double *) malloc(sizeof(double) * h_n_osc * h_n_steps); h_time = (double *) malloc(sizeof(double) * h_n_steps); hipMemcpy(h_alpha, d_alpha, sizeof(double) * h_n_osc * h_n_steps, hipMemcpyDeviceToHost); hipMemcpy(h_theta, d_theta, sizeof(double) * h_n_osc * h_n_steps, hipMemcpyDeviceToHost); hipMemcpy(h_time, d_time, sizeof(double) * h_n_steps, hipMemcpyDeviceToHost); matrix_to_file(h_alpha, ALPHA_FILE, h_n_steps, h_n_osc); matrix_to_file(h_theta, THETA_FILE, h_n_steps, h_n_osc); matrix_to_file(h_time, TIME_FILE, 1, h_n_steps); /* RESOURCE FREEING PHASE * */ hipFree(d_states); free_resources(8, DEVICE_RES, d_matrix, DEVICE_RES, d_periods, DEVICE_RES, d_time, DEVICE_RES, d_alpha, DEVICE_RES, d_theta, HOST_RES, h_alpha, HOST_RES, h_theta, HOST_RES, h_time); } void process_arguments(int argc, char** argv) { if (argc < 4) { fprintf(stderr, "Usage: multigpu <n_oscillators> <n_periods> <noisy_flag>\n"); exit(NOT_ENOUGH_PARAMS); } int ret1 = sscanf(argv[1], "%u", &h_n_osc); //store the number of oscillators into the host variable int ret2 = sscanf(argv[2], "%u", &h_n_per); //store the number of periods into the host variable int ret3 = sscanf(argv[3], "%u", &h_noisy); //store the noisy flag into the host variable if (ret1 != 1 || ret2 != 1 || ret3 != 1) { fprintf(stderr, "An error has occurred while parsing the parameters. Please check their consistency.\n"); exit(WRONG_PARAMS); } else if (h_n_osc < 1) { fprintf(stderr, "Invalid number of oscillators. Valid values: > 1\n"); exit(INVALID_N_OSC); } else if (h_n_per < 1) { fprintf(stderr, "Invalid number of periods. Valid values: > 1\n"); exit(INVALID_N_PER); } else if (!(h_noisy == 0 || h_noisy == 1)) { fprintf(stderr, "Invalid noisy flag. Valid values: 0 (no noise), 1 (noise)\n"); exit(INVALID_NOISY_FLAG); } hipMemcpyToSymbol(d_n_osc, &h_n_osc, sizeof(unsigned int)); //copy into device memory hipMemcpyToSymbol(d_n_per, &h_n_per, sizeof(unsigned int)); //copy into device memory } void free_resources(int n_resources, ...) { if (n_resources < 1) return; va_list param_list; va_start(param_list, n_resources); for (unsigned int i = 1; i <= n_resources; i++) { int res_type = va_arg(param_list, int); if (res_type == DEVICE_RES) hipFree(va_arg(param_list, double*)); else if (res_type == HOST_RES) free(va_arg(param_list, double*)); else { fprintf(stderr, "Invalid free_resoures parameters pattern.\n"); break; } } va_end(param_list); }
b4d9a4bbbeb222731eada0af812092bb2cc3c047.cu
/* * multigpu.cu * * Created on: 01/dic/2014 * Author: Edoardo Mondoni */ #include "samples.h" #include "sim_parameters.h" #include "adm_matrix.h" #include "err.h" #include "multigpu.h" #include "init_osc.h" #include "init_output.h" #include "simulate.h" #include "output_file.h" #include "rng.h" #include <curand_kernel.h> #include <stdio.h> #include <stdarg.h> #include <stdlib.h> /*** GLOBAL VARIABLE DEFINITIONS ***/ unsigned int h_n_osc; unsigned int h_n_per; unsigned int h_noisy; /*** CONSTANT MEMORY DEFINITIONS ***/ __constant__ unsigned int d_n_osc; __constant__ unsigned int d_n_per; int main(int argc, char** argv) { double *d_matrix, *d_periods, *d_time, *d_alpha, *d_theta; curandState *d_states; /* PRELIMINARY PHASE * The arguments are validated and stored in the appropriate variables, including the samples * of the Gamma(t) and Vo(t) functions along with the time grid used for their sampling. * The program initializes the oscillators' properties and the simulation based on the constant * parameters #defined in macros and those derivable from the Gamma(t) and Vo(t) functions. * Then, the transadmittance matrix (d_matrix) is randomly generated and stored in device memory * for later use. */ process_arguments(argc, argv); initialize_rng(h_n_osc, 17021991, &d_states); allocate_samples(0); // copies Gamma(t) and Vo(t) samples (and instants) into dev initialize_simulation(&d_periods, &d_time, d_states); generate_matrix(&d_matrix, d_states); /* OUTPUT INITIALIZATION PHASE * The d_alpha and d_theta pointers are initialized (i.e. memory is allocated which will contain * the results of the simulation). Furthermore, the alpha_k(0) are stored in the appropriate * memory locations (they can be generated here since they are randomized). */ initialize_output(&d_alpha, &d_theta, d_periods, d_states); /* SIMULATION PHASE * This is the core of the program: it is where the simulation happens. */ perform_simulation(d_alpha, d_theta, d_time, d_matrix, d_periods, h_noisy, d_states); /* OUTPUT COPY PHASE * The results of the simulation are copied back to host memory and then to a file. */ double *h_alpha, *h_theta, *h_time; h_alpha = (double *) malloc(sizeof(double) * h_n_osc * h_n_steps); h_theta = (double *) malloc(sizeof(double) * h_n_osc * h_n_steps); h_time = (double *) malloc(sizeof(double) * h_n_steps); cudaMemcpy(h_alpha, d_alpha, sizeof(double) * h_n_osc * h_n_steps, cudaMemcpyDeviceToHost); cudaMemcpy(h_theta, d_theta, sizeof(double) * h_n_osc * h_n_steps, cudaMemcpyDeviceToHost); cudaMemcpy(h_time, d_time, sizeof(double) * h_n_steps, cudaMemcpyDeviceToHost); matrix_to_file(h_alpha, ALPHA_FILE, h_n_steps, h_n_osc); matrix_to_file(h_theta, THETA_FILE, h_n_steps, h_n_osc); matrix_to_file(h_time, TIME_FILE, 1, h_n_steps); /* RESOURCE FREEING PHASE * */ cudaFree(d_states); free_resources(8, DEVICE_RES, d_matrix, DEVICE_RES, d_periods, DEVICE_RES, d_time, DEVICE_RES, d_alpha, DEVICE_RES, d_theta, HOST_RES, h_alpha, HOST_RES, h_theta, HOST_RES, h_time); } void process_arguments(int argc, char** argv) { if (argc < 4) { fprintf(stderr, "Usage: multigpu <n_oscillators> <n_periods> <noisy_flag>\n"); exit(NOT_ENOUGH_PARAMS); } int ret1 = sscanf(argv[1], "%u", &h_n_osc); //store the number of oscillators into the host variable int ret2 = sscanf(argv[2], "%u", &h_n_per); //store the number of periods into the host variable int ret3 = sscanf(argv[3], "%u", &h_noisy); //store the noisy flag into the host variable if (ret1 != 1 || ret2 != 1 || ret3 != 1) { fprintf(stderr, "An error has occurred while parsing the parameters. Please check their consistency.\n"); exit(WRONG_PARAMS); } else if (h_n_osc < 1) { fprintf(stderr, "Invalid number of oscillators. Valid values: > 1\n"); exit(INVALID_N_OSC); } else if (h_n_per < 1) { fprintf(stderr, "Invalid number of periods. Valid values: > 1\n"); exit(INVALID_N_PER); } else if (!(h_noisy == 0 || h_noisy == 1)) { fprintf(stderr, "Invalid noisy flag. Valid values: 0 (no noise), 1 (noise)\n"); exit(INVALID_NOISY_FLAG); } cudaMemcpyToSymbol(d_n_osc, &h_n_osc, sizeof(unsigned int)); //copy into device memory cudaMemcpyToSymbol(d_n_per, &h_n_per, sizeof(unsigned int)); //copy into device memory } void free_resources(int n_resources, ...) { if (n_resources < 1) return; va_list param_list; va_start(param_list, n_resources); for (unsigned int i = 1; i <= n_resources; i++) { int res_type = va_arg(param_list, int); if (res_type == DEVICE_RES) cudaFree(va_arg(param_list, double*)); else if (res_type == HOST_RES) free(va_arg(param_list, double*)); else { fprintf(stderr, "Invalid free_resoures parameters pattern.\n"); break; } } va_end(param_list); }
444a9ca7d5611831300fee928d2c01711d64a22d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <hipcub/hipcub.hpp> #include "caffe2/core/context.h" #include "caffe2/core/context_gpu.h" #include "caffe2/operators/generate_proposals_op.h" #include "caffe2/operators/generate_proposals_op_util_boxes.h" // BBOX_XFORM_CLIP_DEFAULT #include "caffe2/operators/generate_proposals_op_util_nms.h" #include "caffe2/operators/generate_proposals_op_util_nms_gpu.h" using caffe2::utils::RotatedBox; namespace caffe2 { namespace { __global__ void GeneratePreNMSUprightBoxesKernel( const int* d_sorted_scores_keys, const int nboxes_to_generate, const float* d_bbox_deltas, const float4* d_anchors, const int H, const int W, const int A, const float feat_stride, const float min_size, const float* d_img_info_vec, const int num_images, const float bbox_xform_clip, const bool legacy_plus_one, float4* d_out_boxes, const int prenms_nboxes, // leading dimension of out_boxes float* d_inout_scores, char* d_boxes_keep_flags) { const int K = H * W; const int KA = K * A; CUDA_2D_KERNEL_LOOP(ibox, nboxes_to_generate, image_index, num_images) { // box_conv_index : # of the same box, but indexed in // the scores from the conv layer, of shape (A,H,W) // the num_images dimension was already removed // box_conv_index = a*K + h*W + w const int box_conv_index = d_sorted_scores_keys[image_index * KA + ibox]; // We want to decompose box_conv_index in (a,h,w) // such as box_conv_index = a*K + h*W + w // (avoiding modulos in the process) int remaining = box_conv_index; const int dA = K; // stride of A const int a = remaining / dA; remaining -= a * dA; const int dH = W; // stride of H const int h = remaining / dH; remaining -= h * dH; const int w = remaining; // dW = 1 // Loading the anchor a // float4 is a struct with float x,y,z,w const float4 anchor = d_anchors[a]; // x1,y1,x2,y2 :coordinates of anchor a, shifted for position (h,w) const float shift_w = feat_stride * w; float x1 = shift_w + anchor.x; float x2 = shift_w + anchor.z; const float shift_h = feat_stride * h; float y1 = shift_h + anchor.y; float y2 = shift_h + anchor.w; // TODO use fast math when possible // Deltas for that box // Deltas of shape (num_images,4*A,K) // We're going to compute 4 scattered reads // better than the alternative, ie transposing the complete deltas // array first int deltas_idx = image_index * (KA * 4) + a * 4 * K + h * W + w; const float dx = d_bbox_deltas[deltas_idx]; // Stride of K between each dimension deltas_idx += K; const float dy = d_bbox_deltas[deltas_idx]; deltas_idx += K; float dw = d_bbox_deltas[deltas_idx]; deltas_idx += K; float dh = d_bbox_deltas[deltas_idx]; // Upper bound on dw,dh dw = fmin(dw, bbox_xform_clip); dh = fmin(dh, bbox_xform_clip); // Applying the deltas float width = x2 - x1 + float(int(legacy_plus_one)); const float ctr_x = x1 + 0.5f * width; const float pred_ctr_x = ctr_x + width * dx; // TODO fuse madd const float pred_w = width * expf(dw); x1 = pred_ctr_x - 0.5f * pred_w; x2 = pred_ctr_x + 0.5f * pred_w - float(int(legacy_plus_one)); float height = y2 - y1 + float(int(legacy_plus_one)); const float ctr_y = y1 + 0.5f * height; const float pred_ctr_y = ctr_y + height * dy; const float pred_h = height * expf(dh); y1 = pred_ctr_y - 0.5f * pred_h; y2 = pred_ctr_y + 0.5f * pred_h - float(int(legacy_plus_one)); // Clipping box to image const float img_height = d_img_info_vec[3 * image_index + 0]; const float img_width = d_img_info_vec[3 * image_index + 1]; const float min_size_scaled = min_size * d_img_info_vec[3 * image_index + 2]; x1 = fmax(fmin(x1, img_width - float(int(legacy_plus_one))), 0.0f); y1 = fmax(fmin(y1, img_height - float(int(legacy_plus_one))), 0.0f); x2 = fmax(fmin(x2, img_width - float(int(legacy_plus_one))), 0.0f); y2 = fmax(fmin(y2, img_height - float(int(legacy_plus_one))), 0.0f); // Filter boxes // Removing boxes with one dim < min_size // (center of box is in image, because of previous step) width = x2 - x1 + float(int(legacy_plus_one)); // may have changed height = y2 - y1 + float(int(legacy_plus_one)); bool keep_box = fmin(width, height) >= min_size_scaled; // We are not deleting the box right now even if !keep_box // we want to keep the relative order of the elements stable // we'll do it in such a way later // d_boxes_keep_flags size: (num_images,prenms_nboxes) // d_out_boxes size: (num_images,prenms_nboxes) const int out_index = image_index * prenms_nboxes + ibox; d_boxes_keep_flags[out_index] = keep_box; d_out_boxes[out_index] = {x1, y1, x2, y2}; // d_inout_scores size: (num_images,KA) if (!keep_box) d_inout_scores[image_index * KA + ibox] = FLT_MIN; // for NMS } } __global__ void GeneratePreNMSRotatedBoxesKernel( const int* d_sorted_scores_keys, const int nboxes_to_generate, const float* d_bbox_deltas, const RotatedBox* d_anchors, const int H, const int W, const int A, const float feat_stride, const float min_size, const float* d_img_info_vec, const int num_images, const float bbox_xform_clip, const bool legacy_plus_one, const bool angle_bound_on, const int angle_bound_lo, const int angle_bound_hi, const bool clip_angle_thresh, RotatedBox* d_out_boxes, const int prenms_nboxes, // leading dimension of out_boxes float* d_inout_scores, char* d_boxes_keep_flags) { constexpr float PI = 3.14159265358979323846; const int K = H * W; const int KA = K * A; CUDA_2D_KERNEL_LOOP(ibox, nboxes_to_generate, image_index, num_images) { // box_conv_index : # of the same box, but indexed in // the scores from the conv layer, of shape (A,H,W) // the num_images dimension was already removed // box_conv_index = a*K + h*W + w const int box_conv_index = d_sorted_scores_keys[image_index * KA + ibox]; // We want to decompose box_conv_index in (a,h,w) // such as box_conv_index = a*K + h*W + w // (avoiding modulos in the process) int remaining = box_conv_index; const int dA = K; // stride of A const int a = remaining / dA; remaining -= a * dA; const int dH = W; // stride of H const int h = remaining / dH; remaining -= h * dH; const int w = remaining; // dW = 1 // Loading the anchor a and applying shifts. // RotatedBox in [ctr_x, ctr_y, w, h, angle] format. // Zero shift for width, height and angle. RotatedBox box = d_anchors[a]; box.x_ctr += feat_stride * w; // x_ctr shifted for w box.y_ctr += feat_stride * h; // y_ctr shifted for h // TODO use fast math when possible // Deltas for that box // Deltas of shape (num_images,5*A,K) // We're going to compute 5 scattered reads // better than the alternative, ie transposing the complete deltas // array first int deltas_idx = image_index * (KA * 5) + a * 5 * K + h * W + w; // Stride of K between each dimension RotatedBox delta; delta.x_ctr = d_bbox_deltas[deltas_idx + K * 0]; delta.y_ctr = d_bbox_deltas[deltas_idx + K * 1]; delta.w = d_bbox_deltas[deltas_idx + K * 2]; delta.h = d_bbox_deltas[deltas_idx + K * 3]; delta.a = d_bbox_deltas[deltas_idx + K * 4]; // Upper bound on dw,dh delta.w = fmin(delta.w, bbox_xform_clip); delta.h = fmin(delta.h, bbox_xform_clip); // Convert back to degrees delta.a *= 180.f / PI; // Applying the deltas box.x_ctr += delta.x_ctr * box.w; box.y_ctr += delta.y_ctr * box.h; box.w *= expf(delta.w); box.h *= expf(delta.h); box.a += delta.a; if (angle_bound_on) { // Normalize angle to be within [angle_bound_lo, angle_bound_hi]. // Deltas are guaranteed to be <= period / 2 while computing training // targets by bbox_transform_inv. const float period = angle_bound_hi - angle_bound_lo; // CAFFE_ENFORCE(period > 0 && period % 180 == 0); if (box.a < angle_bound_lo) { box.a += period; } else if (box.a > angle_bound_hi) { box.a -= period; } } // Clipping box to image. // Only clip boxes that are almost upright (with a tolerance of // clip_angle_thresh) for backward compatibility with horizontal boxes. const float img_height = d_img_info_vec[3 * image_index + 0]; const float img_width = d_img_info_vec[3 * image_index + 1]; const float min_size_scaled = min_size * d_img_info_vec[3 * image_index + 2]; if (fabs(box.a) <= clip_angle_thresh) { // Convert from [x_ctr, y_ctr, w, h] to [x1, y1, x2, y2] float x1 = box.x_ctr - (box.w - float(int(legacy_plus_one))) / 2.f; float y1 = box.y_ctr - (box.h - float(int(legacy_plus_one))) / 2.f; float x2 = x1 + box.w - float(int(legacy_plus_one)); float y2 = y1 + box.h - float(int(legacy_plus_one)); // Clip x1 = fmax(fmin(x1, img_width - float(int(legacy_plus_one))), 0.0f); y1 = fmax(fmin(y1, img_height - float(int(legacy_plus_one))), 0.0f); x2 = fmax(fmin(x2, img_width - float(int(legacy_plus_one))), 0.0f); y2 = fmax(fmin(y2, img_height - float(int(legacy_plus_one))), 0.0f); // Convert back to [x_ctr, y_ctr, w, h] box.x_ctr = (x1 + x2) / 2.f; box.y_ctr = (y1 + y2) / 2.f; box.w = x2 - x1 + float(int(legacy_plus_one)); box.h = y2 - y1 + float(int(legacy_plus_one)); } // Filter boxes. // Removing boxes with one dim < min_size or center outside the image. bool keep_box = (fmin(box.w, box.h) >= min_size_scaled) && (box.x_ctr < img_width) && (box.y_ctr < img_height); // We are not deleting the box right now even if !keep_box // we want to keep the relative order of the elements stable // we'll do it in such a way later // d_boxes_keep_flags size: (num_images,prenms_nboxes) // d_out_boxes size: (num_images,prenms_nboxes) const int out_index = image_index * prenms_nboxes + ibox; d_boxes_keep_flags[out_index] = keep_box; d_out_boxes[out_index] = box; // d_inout_scores size: (num_images,KA) if (!keep_box) { d_inout_scores[image_index * KA + ibox] = FLT_MIN; // for NMS } } } __global__ void WriteUprightBoxesOutput( const float4* d_image_boxes, const float* d_image_scores, const int* d_image_boxes_keep_list, const int nboxes, const int image_index, float* d_image_out_rois, float* d_image_out_rois_probs) { CUDA_1D_KERNEL_LOOP(i, nboxes) { const int ibox = d_image_boxes_keep_list[i]; const float4 box = d_image_boxes[ibox]; const float score = d_image_scores[ibox]; // Scattered memory accesses // postnms_nboxes is small anyway d_image_out_rois_probs[i] = score; const int base_idx = 5 * i; d_image_out_rois[base_idx + 0] = image_index; d_image_out_rois[base_idx + 1] = box.x; d_image_out_rois[base_idx + 2] = box.y; d_image_out_rois[base_idx + 3] = box.z; d_image_out_rois[base_idx + 4] = box.w; } } __global__ void WriteRotatedBoxesOutput( const RotatedBox* d_image_boxes, const float* d_image_scores, const int* d_image_boxes_keep_list, const int nboxes, const int image_index, float* d_image_out_rois, float* d_image_out_rois_probs) { CUDA_1D_KERNEL_LOOP(i, nboxes) { const int ibox = d_image_boxes_keep_list[i]; const RotatedBox box = d_image_boxes[ibox]; const float score = d_image_scores[ibox]; // Scattered memory accesses // postnms_nboxes is small anyway d_image_out_rois_probs[i] = score; const int base_idx = 6 * i; d_image_out_rois[base_idx + 0] = image_index; d_image_out_rois[base_idx + 1] = box.x_ctr; d_image_out_rois[base_idx + 2] = box.y_ctr; d_image_out_rois[base_idx + 3] = box.w; d_image_out_rois[base_idx + 4] = box.h; d_image_out_rois[base_idx + 5] = box.a; } } __global__ void InitializeDataKernel( const int num_images, const int KA, int* d_image_offsets, int* d_boxes_keys_iota) { CUDA_2D_KERNEL_LOOP(box_idx, KA, img_idx, num_images) { d_boxes_keys_iota[img_idx * KA + box_idx] = box_idx; // One 1D line sets the 1D data if (box_idx == 0) { d_image_offsets[img_idx] = KA * img_idx; // One thread sets the last+1 offset if (img_idx == 0) d_image_offsets[num_images] = KA * num_images; } } } } // namespace template <> bool GenerateProposalsOp<CUDAContext>::RunOnDevice() { const auto& scores = Input(0); const auto& bbox_deltas = Input(1); const auto& im_info_tensor = Input(2); const auto& anchors = Input(3); auto* out_rois = Output(0); auto* out_rois_probs = Output(1); CAFFE_ENFORCE_EQ(scores.ndim(), 4, scores.ndim()); CAFFE_ENFORCE(scores.template IsType<float>(), scores.meta().name()); const auto num_images = scores.dim(0); const auto A = scores.dim(1); const auto H = scores.dim(2); const auto W = scores.dim(3); const auto box_dim = anchors.dim(1); CAFFE_ENFORCE(box_dim == 4 || box_dim == 5); const int K = H * W; const int conv_layer_nboxes = K * A; // Getting data members ready // We'll sort the scores // we want to remember their original indexes, // ie their indexes in the tensor of shape (num_images,A,K) // from the conv layer // each row of d_conv_layer_indexes is at first initialized to 1..A*K dev_conv_layer_indexes_.Resize(num_images, conv_layer_nboxes); int* d_conv_layer_indexes = dev_conv_layer_indexes_.template mutable_data<int>(); // d_image_offset[i] = i*K*A for i from 1 to num_images+1 // Used by the segmented sort to only sort scores within one image dev_image_offset_.Resize(num_images + 1); int* d_image_offset = dev_image_offset_.template mutable_data<int>(); // The following calls to CUB primitives do nothing // (because the first arg is nullptr) // except setting cub_*_temp_storage_bytes size_t cub_sort_temp_storage_bytes = 0; float* flt_ptr = nullptr; int* int_ptr = nullptr; hipcub::DeviceSegmentedRadixSort::SortPairsDescending( nullptr, cub_sort_temp_storage_bytes, flt_ptr, flt_ptr, int_ptr, int_ptr, num_images * conv_layer_nboxes, num_images, int_ptr, int_ptr, 0, 8 * sizeof(float), // sort all bits context_.cuda_stream()); // Allocate temporary storage for CUB dev_cub_sort_buffer_.Resize(cub_sort_temp_storage_bytes); void* d_cub_sort_temp_storage = dev_cub_sort_buffer_.template mutable_data<char>(); size_t cub_select_temp_storage_bytes = 0; char* char_ptr = nullptr; hipcub::DeviceSelect::Flagged( nullptr, cub_select_temp_storage_bytes, flt_ptr, char_ptr, flt_ptr, int_ptr, K * A, context_.cuda_stream()); // Allocate temporary storage for CUB dev_cub_select_buffer_.Resize(cub_select_temp_storage_bytes); void* d_cub_select_temp_storage = dev_cub_select_buffer_.template mutable_data<char>(); // Initialize : // - each row of dev_conv_layer_indexes to 1..K*A // - each d_nboxes to 0 // - d_image_offset[i] = K*A*i for i 1..num_images+1 // 2D grid hipLaunchKernelGGL(( InitializeDataKernel), dim3((CAFFE_GET_BLOCKS(A * K), num_images)), dim3(CAFFE_CUDA_NUM_THREADS), // blockDim.y == 1 0, context_.cuda_stream(), num_images, conv_layer_nboxes, d_image_offset, d_conv_layer_indexes); // Sorting input scores dev_sorted_conv_layer_indexes_.Resize(num_images, conv_layer_nboxes); dev_sorted_scores_.Resize(num_images, conv_layer_nboxes); const float* d_in_scores = scores.data<float>(); int* d_sorted_conv_layer_indexes = dev_sorted_conv_layer_indexes_.template mutable_data<int>(); float* d_sorted_scores = dev_sorted_scores_.template mutable_data<float>(); ; hipcub::DeviceSegmentedRadixSort::SortPairsDescending( d_cub_sort_temp_storage, cub_sort_temp_storage_bytes, d_in_scores, d_sorted_scores, d_conv_layer_indexes, d_sorted_conv_layer_indexes, num_images * conv_layer_nboxes, num_images, d_image_offset, d_image_offset + 1, 0, 8 * sizeof(float), // sort all bits context_.cuda_stream()); // Keeping only the topN pre_nms const int nboxes_to_generate = ::min(conv_layer_nboxes, rpn_pre_nms_topN_); // Generating the boxes associated to the topN pre_nms scores dev_boxes_.Resize(num_images, box_dim * nboxes_to_generate); dev_boxes_keep_flags_.Resize(num_images, nboxes_to_generate); const float* d_bbox_deltas = bbox_deltas.data<float>(); const float* d_anchors = anchors.data<float>(); const float* d_im_info_vec = im_info_tensor.data<float>(); float* d_boxes = dev_boxes_.template mutable_data<float>(); ; char* d_boxes_keep_flags = dev_boxes_keep_flags_.template mutable_data<char>(); if (box_dim == 4) { hipLaunchKernelGGL(( GeneratePreNMSUprightBoxesKernel), dim3((CAFFE_GET_BLOCKS(nboxes_to_generate), num_images)), dim3(CAFFE_CUDA_NUM_THREADS), // blockDim.y == 1 0, context_.cuda_stream(), d_sorted_conv_layer_indexes, nboxes_to_generate, d_bbox_deltas, reinterpret_cast<const float4*>(d_anchors), H, W, A, feat_stride_, rpn_min_size_, d_im_info_vec, num_images, utils::BBOX_XFORM_CLIP_DEFAULT, legacy_plus_one_, reinterpret_cast<float4*>(d_boxes), nboxes_to_generate, d_sorted_scores, d_boxes_keep_flags); } else { hipLaunchKernelGGL(( GeneratePreNMSRotatedBoxesKernel), dim3((CAFFE_GET_BLOCKS(nboxes_to_generate), num_images)), dim3(CAFFE_CUDA_NUM_THREADS), // blockDim.y == 1 0, context_.cuda_stream(), d_sorted_conv_layer_indexes, nboxes_to_generate, d_bbox_deltas, reinterpret_cast<const RotatedBox*>(d_anchors), H, W, A, feat_stride_, rpn_min_size_, d_im_info_vec, num_images, utils::BBOX_XFORM_CLIP_DEFAULT, legacy_plus_one_, angle_bound_on_, angle_bound_lo_, angle_bound_hi_, clip_angle_thresh_, reinterpret_cast<RotatedBox*>(d_boxes), nboxes_to_generate, d_sorted_scores, d_boxes_keep_flags); } const int nboxes_generated = nboxes_to_generate; dev_image_prenms_boxes_.Resize(box_dim * nboxes_generated); float* d_image_prenms_boxes = dev_image_prenms_boxes_.template mutable_data<float>(); dev_image_prenms_scores_.Resize(nboxes_generated); float* d_image_prenms_scores = dev_image_prenms_scores_.template mutable_data<float>(); dev_image_boxes_keep_list_.Resize(nboxes_generated); int* d_image_boxes_keep_list = dev_image_boxes_keep_list_.template mutable_data<int>(); const int roi_cols = box_dim + 1; const int max_postnms_nboxes = ::min(nboxes_generated, rpn_post_nms_topN_); dev_postnms_rois_.Resize(roi_cols * num_images * max_postnms_nboxes); dev_postnms_rois_probs_.Resize(num_images * max_postnms_nboxes); float* d_postnms_rois = dev_postnms_rois_.template mutable_data<float>(); float* d_postnms_rois_probs = dev_postnms_rois_probs_.template mutable_data<float>(); dev_prenms_nboxes_.Resize(num_images); host_prenms_nboxes_.Resize(num_images); int* d_prenms_nboxes = dev_prenms_nboxes_.template mutable_data<int>(); int* h_prenms_nboxes = host_prenms_nboxes_.template mutable_data<int>(); int nrois_in_output = 0; for (int image_index = 0; image_index < num_images; ++image_index) { // Sub matrices for current image const float* d_image_boxes = &d_boxes[image_index * nboxes_generated * box_dim]; const float* d_image_sorted_scores = &d_sorted_scores[image_index * K * A]; char* d_image_boxes_keep_flags = &d_boxes_keep_flags[image_index * nboxes_generated]; float* d_image_postnms_rois = &d_postnms_rois[roi_cols * nrois_in_output]; float* d_image_postnms_rois_probs = &d_postnms_rois_probs[nrois_in_output]; // Moving valid boxes (ie the ones with d_boxes_keep_flags[ibox] == true) // to the output tensors if (box_dim == 4) { hipcub::DeviceSelect::Flagged( d_cub_select_temp_storage, cub_select_temp_storage_bytes, reinterpret_cast<const float4*>(d_image_boxes), d_image_boxes_keep_flags, reinterpret_cast<float4*>(d_image_prenms_boxes), d_prenms_nboxes, nboxes_generated, context_.cuda_stream()); } else { hipcub::DeviceSelect::Flagged( d_cub_select_temp_storage, cub_select_temp_storage_bytes, reinterpret_cast<const RotatedBox*>(d_image_boxes), d_image_boxes_keep_flags, reinterpret_cast<RotatedBox*>(d_image_prenms_boxes), d_prenms_nboxes, nboxes_generated, context_.cuda_stream()); } hipcub::DeviceSelect::Flagged( d_cub_select_temp_storage, cub_select_temp_storage_bytes, d_image_sorted_scores, d_image_boxes_keep_flags, d_image_prenms_scores, d_prenms_nboxes, nboxes_generated, context_.cuda_stream()); host_prenms_nboxes_.CopyFrom(dev_prenms_nboxes_); // We know prenms_boxes <= topN_prenms, because nboxes_generated <= // topN_prenms. Calling NMS on the generated boxes const int prenms_nboxes = *h_prenms_nboxes; int nkeep; utils::nms_gpu( d_image_prenms_boxes, prenms_nboxes, rpn_nms_thresh_, legacy_plus_one_, d_image_boxes_keep_list, &nkeep, dev_nms_mask_, host_nms_mask_, &context_, box_dim); // All operations done after previous sort were keeping the relative order // of the elements the elements are still sorted keep topN <=> truncate the // array const int postnms_nboxes = ::min(nkeep, rpn_post_nms_topN_); // Moving the out boxes to the output tensors, // adding the image_index dimension on the fly if (box_dim == 4) { hipLaunchKernelGGL(( WriteUprightBoxesOutput), dim3(CAFFE_GET_BLOCKS(postnms_nboxes)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), reinterpret_cast<const float4*>(d_image_prenms_boxes), d_image_prenms_scores, d_image_boxes_keep_list, postnms_nboxes, image_index, d_image_postnms_rois, d_image_postnms_rois_probs); } else { hipLaunchKernelGGL(( WriteRotatedBoxesOutput), dim3(CAFFE_GET_BLOCKS(postnms_nboxes)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), reinterpret_cast<const RotatedBox*>(d_image_prenms_boxes), d_image_prenms_scores, d_image_boxes_keep_list, postnms_nboxes, image_index, d_image_postnms_rois, d_image_postnms_rois_probs); } nrois_in_output += postnms_nboxes; } // Using a buffer because we cannot call ShrinkTo out_rois->Resize(nrois_in_output, roi_cols); out_rois_probs->Resize(nrois_in_output); float* d_out_rois = out_rois->template mutable_data<float>(); float* d_out_rois_probs = out_rois_probs->template mutable_data<float>(); CUDA_CHECK(hipMemcpyAsync( d_out_rois, d_postnms_rois, nrois_in_output * roi_cols * sizeof(float), hipMemcpyDeviceToDevice, context_.cuda_stream())); CUDA_CHECK(hipMemcpyAsync( d_out_rois_probs, d_postnms_rois_probs, nrois_in_output * sizeof(float), hipMemcpyDeviceToDevice, context_.cuda_stream())); return true; } REGISTER_CUDA_OPERATOR(GenerateProposals, GenerateProposalsOp<CUDAContext>); } // namespace caffe2 C10_REGISTER_CAFFE2_OPERATOR_CUDA( GenerateProposals, caffe2::GenerateProposalsOp<caffe2::CUDAContext>);
444a9ca7d5611831300fee928d2c01711d64a22d.cu
#include <cub/cub.cuh> #include "caffe2/core/context.h" #include "caffe2/core/context_gpu.h" #include "caffe2/operators/generate_proposals_op.h" #include "caffe2/operators/generate_proposals_op_util_boxes.h" // BBOX_XFORM_CLIP_DEFAULT #include "caffe2/operators/generate_proposals_op_util_nms.h" #include "caffe2/operators/generate_proposals_op_util_nms_gpu.h" using caffe2::utils::RotatedBox; namespace caffe2 { namespace { __global__ void GeneratePreNMSUprightBoxesKernel( const int* d_sorted_scores_keys, const int nboxes_to_generate, const float* d_bbox_deltas, const float4* d_anchors, const int H, const int W, const int A, const float feat_stride, const float min_size, const float* d_img_info_vec, const int num_images, const float bbox_xform_clip, const bool legacy_plus_one, float4* d_out_boxes, const int prenms_nboxes, // leading dimension of out_boxes float* d_inout_scores, char* d_boxes_keep_flags) { const int K = H * W; const int KA = K * A; CUDA_2D_KERNEL_LOOP(ibox, nboxes_to_generate, image_index, num_images) { // box_conv_index : # of the same box, but indexed in // the scores from the conv layer, of shape (A,H,W) // the num_images dimension was already removed // box_conv_index = a*K + h*W + w const int box_conv_index = d_sorted_scores_keys[image_index * KA + ibox]; // We want to decompose box_conv_index in (a,h,w) // such as box_conv_index = a*K + h*W + w // (avoiding modulos in the process) int remaining = box_conv_index; const int dA = K; // stride of A const int a = remaining / dA; remaining -= a * dA; const int dH = W; // stride of H const int h = remaining / dH; remaining -= h * dH; const int w = remaining; // dW = 1 // Loading the anchor a // float4 is a struct with float x,y,z,w const float4 anchor = d_anchors[a]; // x1,y1,x2,y2 :coordinates of anchor a, shifted for position (h,w) const float shift_w = feat_stride * w; float x1 = shift_w + anchor.x; float x2 = shift_w + anchor.z; const float shift_h = feat_stride * h; float y1 = shift_h + anchor.y; float y2 = shift_h + anchor.w; // TODO use fast math when possible // Deltas for that box // Deltas of shape (num_images,4*A,K) // We're going to compute 4 scattered reads // better than the alternative, ie transposing the complete deltas // array first int deltas_idx = image_index * (KA * 4) + a * 4 * K + h * W + w; const float dx = d_bbox_deltas[deltas_idx]; // Stride of K between each dimension deltas_idx += K; const float dy = d_bbox_deltas[deltas_idx]; deltas_idx += K; float dw = d_bbox_deltas[deltas_idx]; deltas_idx += K; float dh = d_bbox_deltas[deltas_idx]; // Upper bound on dw,dh dw = fmin(dw, bbox_xform_clip); dh = fmin(dh, bbox_xform_clip); // Applying the deltas float width = x2 - x1 + float(int(legacy_plus_one)); const float ctr_x = x1 + 0.5f * width; const float pred_ctr_x = ctr_x + width * dx; // TODO fuse madd const float pred_w = width * expf(dw); x1 = pred_ctr_x - 0.5f * pred_w; x2 = pred_ctr_x + 0.5f * pred_w - float(int(legacy_plus_one)); float height = y2 - y1 + float(int(legacy_plus_one)); const float ctr_y = y1 + 0.5f * height; const float pred_ctr_y = ctr_y + height * dy; const float pred_h = height * expf(dh); y1 = pred_ctr_y - 0.5f * pred_h; y2 = pred_ctr_y + 0.5f * pred_h - float(int(legacy_plus_one)); // Clipping box to image const float img_height = d_img_info_vec[3 * image_index + 0]; const float img_width = d_img_info_vec[3 * image_index + 1]; const float min_size_scaled = min_size * d_img_info_vec[3 * image_index + 2]; x1 = fmax(fmin(x1, img_width - float(int(legacy_plus_one))), 0.0f); y1 = fmax(fmin(y1, img_height - float(int(legacy_plus_one))), 0.0f); x2 = fmax(fmin(x2, img_width - float(int(legacy_plus_one))), 0.0f); y2 = fmax(fmin(y2, img_height - float(int(legacy_plus_one))), 0.0f); // Filter boxes // Removing boxes with one dim < min_size // (center of box is in image, because of previous step) width = x2 - x1 + float(int(legacy_plus_one)); // may have changed height = y2 - y1 + float(int(legacy_plus_one)); bool keep_box = fmin(width, height) >= min_size_scaled; // We are not deleting the box right now even if !keep_box // we want to keep the relative order of the elements stable // we'll do it in such a way later // d_boxes_keep_flags size: (num_images,prenms_nboxes) // d_out_boxes size: (num_images,prenms_nboxes) const int out_index = image_index * prenms_nboxes + ibox; d_boxes_keep_flags[out_index] = keep_box; d_out_boxes[out_index] = {x1, y1, x2, y2}; // d_inout_scores size: (num_images,KA) if (!keep_box) d_inout_scores[image_index * KA + ibox] = FLT_MIN; // for NMS } } __global__ void GeneratePreNMSRotatedBoxesKernel( const int* d_sorted_scores_keys, const int nboxes_to_generate, const float* d_bbox_deltas, const RotatedBox* d_anchors, const int H, const int W, const int A, const float feat_stride, const float min_size, const float* d_img_info_vec, const int num_images, const float bbox_xform_clip, const bool legacy_plus_one, const bool angle_bound_on, const int angle_bound_lo, const int angle_bound_hi, const bool clip_angle_thresh, RotatedBox* d_out_boxes, const int prenms_nboxes, // leading dimension of out_boxes float* d_inout_scores, char* d_boxes_keep_flags) { constexpr float PI = 3.14159265358979323846; const int K = H * W; const int KA = K * A; CUDA_2D_KERNEL_LOOP(ibox, nboxes_to_generate, image_index, num_images) { // box_conv_index : # of the same box, but indexed in // the scores from the conv layer, of shape (A,H,W) // the num_images dimension was already removed // box_conv_index = a*K + h*W + w const int box_conv_index = d_sorted_scores_keys[image_index * KA + ibox]; // We want to decompose box_conv_index in (a,h,w) // such as box_conv_index = a*K + h*W + w // (avoiding modulos in the process) int remaining = box_conv_index; const int dA = K; // stride of A const int a = remaining / dA; remaining -= a * dA; const int dH = W; // stride of H const int h = remaining / dH; remaining -= h * dH; const int w = remaining; // dW = 1 // Loading the anchor a and applying shifts. // RotatedBox in [ctr_x, ctr_y, w, h, angle] format. // Zero shift for width, height and angle. RotatedBox box = d_anchors[a]; box.x_ctr += feat_stride * w; // x_ctr shifted for w box.y_ctr += feat_stride * h; // y_ctr shifted for h // TODO use fast math when possible // Deltas for that box // Deltas of shape (num_images,5*A,K) // We're going to compute 5 scattered reads // better than the alternative, ie transposing the complete deltas // array first int deltas_idx = image_index * (KA * 5) + a * 5 * K + h * W + w; // Stride of K between each dimension RotatedBox delta; delta.x_ctr = d_bbox_deltas[deltas_idx + K * 0]; delta.y_ctr = d_bbox_deltas[deltas_idx + K * 1]; delta.w = d_bbox_deltas[deltas_idx + K * 2]; delta.h = d_bbox_deltas[deltas_idx + K * 3]; delta.a = d_bbox_deltas[deltas_idx + K * 4]; // Upper bound on dw,dh delta.w = fmin(delta.w, bbox_xform_clip); delta.h = fmin(delta.h, bbox_xform_clip); // Convert back to degrees delta.a *= 180.f / PI; // Applying the deltas box.x_ctr += delta.x_ctr * box.w; box.y_ctr += delta.y_ctr * box.h; box.w *= expf(delta.w); box.h *= expf(delta.h); box.a += delta.a; if (angle_bound_on) { // Normalize angle to be within [angle_bound_lo, angle_bound_hi]. // Deltas are guaranteed to be <= period / 2 while computing training // targets by bbox_transform_inv. const float period = angle_bound_hi - angle_bound_lo; // CAFFE_ENFORCE(period > 0 && period % 180 == 0); if (box.a < angle_bound_lo) { box.a += period; } else if (box.a > angle_bound_hi) { box.a -= period; } } // Clipping box to image. // Only clip boxes that are almost upright (with a tolerance of // clip_angle_thresh) for backward compatibility with horizontal boxes. const float img_height = d_img_info_vec[3 * image_index + 0]; const float img_width = d_img_info_vec[3 * image_index + 1]; const float min_size_scaled = min_size * d_img_info_vec[3 * image_index + 2]; if (fabs(box.a) <= clip_angle_thresh) { // Convert from [x_ctr, y_ctr, w, h] to [x1, y1, x2, y2] float x1 = box.x_ctr - (box.w - float(int(legacy_plus_one))) / 2.f; float y1 = box.y_ctr - (box.h - float(int(legacy_plus_one))) / 2.f; float x2 = x1 + box.w - float(int(legacy_plus_one)); float y2 = y1 + box.h - float(int(legacy_plus_one)); // Clip x1 = fmax(fmin(x1, img_width - float(int(legacy_plus_one))), 0.0f); y1 = fmax(fmin(y1, img_height - float(int(legacy_plus_one))), 0.0f); x2 = fmax(fmin(x2, img_width - float(int(legacy_plus_one))), 0.0f); y2 = fmax(fmin(y2, img_height - float(int(legacy_plus_one))), 0.0f); // Convert back to [x_ctr, y_ctr, w, h] box.x_ctr = (x1 + x2) / 2.f; box.y_ctr = (y1 + y2) / 2.f; box.w = x2 - x1 + float(int(legacy_plus_one)); box.h = y2 - y1 + float(int(legacy_plus_one)); } // Filter boxes. // Removing boxes with one dim < min_size or center outside the image. bool keep_box = (fmin(box.w, box.h) >= min_size_scaled) && (box.x_ctr < img_width) && (box.y_ctr < img_height); // We are not deleting the box right now even if !keep_box // we want to keep the relative order of the elements stable // we'll do it in such a way later // d_boxes_keep_flags size: (num_images,prenms_nboxes) // d_out_boxes size: (num_images,prenms_nboxes) const int out_index = image_index * prenms_nboxes + ibox; d_boxes_keep_flags[out_index] = keep_box; d_out_boxes[out_index] = box; // d_inout_scores size: (num_images,KA) if (!keep_box) { d_inout_scores[image_index * KA + ibox] = FLT_MIN; // for NMS } } } __global__ void WriteUprightBoxesOutput( const float4* d_image_boxes, const float* d_image_scores, const int* d_image_boxes_keep_list, const int nboxes, const int image_index, float* d_image_out_rois, float* d_image_out_rois_probs) { CUDA_1D_KERNEL_LOOP(i, nboxes) { const int ibox = d_image_boxes_keep_list[i]; const float4 box = d_image_boxes[ibox]; const float score = d_image_scores[ibox]; // Scattered memory accesses // postnms_nboxes is small anyway d_image_out_rois_probs[i] = score; const int base_idx = 5 * i; d_image_out_rois[base_idx + 0] = image_index; d_image_out_rois[base_idx + 1] = box.x; d_image_out_rois[base_idx + 2] = box.y; d_image_out_rois[base_idx + 3] = box.z; d_image_out_rois[base_idx + 4] = box.w; } } __global__ void WriteRotatedBoxesOutput( const RotatedBox* d_image_boxes, const float* d_image_scores, const int* d_image_boxes_keep_list, const int nboxes, const int image_index, float* d_image_out_rois, float* d_image_out_rois_probs) { CUDA_1D_KERNEL_LOOP(i, nboxes) { const int ibox = d_image_boxes_keep_list[i]; const RotatedBox box = d_image_boxes[ibox]; const float score = d_image_scores[ibox]; // Scattered memory accesses // postnms_nboxes is small anyway d_image_out_rois_probs[i] = score; const int base_idx = 6 * i; d_image_out_rois[base_idx + 0] = image_index; d_image_out_rois[base_idx + 1] = box.x_ctr; d_image_out_rois[base_idx + 2] = box.y_ctr; d_image_out_rois[base_idx + 3] = box.w; d_image_out_rois[base_idx + 4] = box.h; d_image_out_rois[base_idx + 5] = box.a; } } __global__ void InitializeDataKernel( const int num_images, const int KA, int* d_image_offsets, int* d_boxes_keys_iota) { CUDA_2D_KERNEL_LOOP(box_idx, KA, img_idx, num_images) { d_boxes_keys_iota[img_idx * KA + box_idx] = box_idx; // One 1D line sets the 1D data if (box_idx == 0) { d_image_offsets[img_idx] = KA * img_idx; // One thread sets the last+1 offset if (img_idx == 0) d_image_offsets[num_images] = KA * num_images; } } } } // namespace template <> bool GenerateProposalsOp<CUDAContext>::RunOnDevice() { const auto& scores = Input(0); const auto& bbox_deltas = Input(1); const auto& im_info_tensor = Input(2); const auto& anchors = Input(3); auto* out_rois = Output(0); auto* out_rois_probs = Output(1); CAFFE_ENFORCE_EQ(scores.ndim(), 4, scores.ndim()); CAFFE_ENFORCE(scores.template IsType<float>(), scores.meta().name()); const auto num_images = scores.dim(0); const auto A = scores.dim(1); const auto H = scores.dim(2); const auto W = scores.dim(3); const auto box_dim = anchors.dim(1); CAFFE_ENFORCE(box_dim == 4 || box_dim == 5); const int K = H * W; const int conv_layer_nboxes = K * A; // Getting data members ready // We'll sort the scores // we want to remember their original indexes, // ie their indexes in the tensor of shape (num_images,A,K) // from the conv layer // each row of d_conv_layer_indexes is at first initialized to 1..A*K dev_conv_layer_indexes_.Resize(num_images, conv_layer_nboxes); int* d_conv_layer_indexes = dev_conv_layer_indexes_.template mutable_data<int>(); // d_image_offset[i] = i*K*A for i from 1 to num_images+1 // Used by the segmented sort to only sort scores within one image dev_image_offset_.Resize(num_images + 1); int* d_image_offset = dev_image_offset_.template mutable_data<int>(); // The following calls to CUB primitives do nothing // (because the first arg is nullptr) // except setting cub_*_temp_storage_bytes size_t cub_sort_temp_storage_bytes = 0; float* flt_ptr = nullptr; int* int_ptr = nullptr; cub::DeviceSegmentedRadixSort::SortPairsDescending( nullptr, cub_sort_temp_storage_bytes, flt_ptr, flt_ptr, int_ptr, int_ptr, num_images * conv_layer_nboxes, num_images, int_ptr, int_ptr, 0, 8 * sizeof(float), // sort all bits context_.cuda_stream()); // Allocate temporary storage for CUB dev_cub_sort_buffer_.Resize(cub_sort_temp_storage_bytes); void* d_cub_sort_temp_storage = dev_cub_sort_buffer_.template mutable_data<char>(); size_t cub_select_temp_storage_bytes = 0; char* char_ptr = nullptr; cub::DeviceSelect::Flagged( nullptr, cub_select_temp_storage_bytes, flt_ptr, char_ptr, flt_ptr, int_ptr, K * A, context_.cuda_stream()); // Allocate temporary storage for CUB dev_cub_select_buffer_.Resize(cub_select_temp_storage_bytes); void* d_cub_select_temp_storage = dev_cub_select_buffer_.template mutable_data<char>(); // Initialize : // - each row of dev_conv_layer_indexes to 1..K*A // - each d_nboxes to 0 // - d_image_offset[i] = K*A*i for i 1..num_images+1 // 2D grid InitializeDataKernel<<< (CAFFE_GET_BLOCKS(A * K), num_images), CAFFE_CUDA_NUM_THREADS, // blockDim.y == 1 0, context_.cuda_stream()>>>( num_images, conv_layer_nboxes, d_image_offset, d_conv_layer_indexes); // Sorting input scores dev_sorted_conv_layer_indexes_.Resize(num_images, conv_layer_nboxes); dev_sorted_scores_.Resize(num_images, conv_layer_nboxes); const float* d_in_scores = scores.data<float>(); int* d_sorted_conv_layer_indexes = dev_sorted_conv_layer_indexes_.template mutable_data<int>(); float* d_sorted_scores = dev_sorted_scores_.template mutable_data<float>(); ; cub::DeviceSegmentedRadixSort::SortPairsDescending( d_cub_sort_temp_storage, cub_sort_temp_storage_bytes, d_in_scores, d_sorted_scores, d_conv_layer_indexes, d_sorted_conv_layer_indexes, num_images * conv_layer_nboxes, num_images, d_image_offset, d_image_offset + 1, 0, 8 * sizeof(float), // sort all bits context_.cuda_stream()); // Keeping only the topN pre_nms const int nboxes_to_generate = std::min(conv_layer_nboxes, rpn_pre_nms_topN_); // Generating the boxes associated to the topN pre_nms scores dev_boxes_.Resize(num_images, box_dim * nboxes_to_generate); dev_boxes_keep_flags_.Resize(num_images, nboxes_to_generate); const float* d_bbox_deltas = bbox_deltas.data<float>(); const float* d_anchors = anchors.data<float>(); const float* d_im_info_vec = im_info_tensor.data<float>(); float* d_boxes = dev_boxes_.template mutable_data<float>(); ; char* d_boxes_keep_flags = dev_boxes_keep_flags_.template mutable_data<char>(); if (box_dim == 4) { GeneratePreNMSUprightBoxesKernel<<< (CAFFE_GET_BLOCKS(nboxes_to_generate), num_images), CAFFE_CUDA_NUM_THREADS, // blockDim.y == 1 0, context_.cuda_stream()>>>( d_sorted_conv_layer_indexes, nboxes_to_generate, d_bbox_deltas, reinterpret_cast<const float4*>(d_anchors), H, W, A, feat_stride_, rpn_min_size_, d_im_info_vec, num_images, utils::BBOX_XFORM_CLIP_DEFAULT, legacy_plus_one_, reinterpret_cast<float4*>(d_boxes), nboxes_to_generate, d_sorted_scores, d_boxes_keep_flags); } else { GeneratePreNMSRotatedBoxesKernel<<< (CAFFE_GET_BLOCKS(nboxes_to_generate), num_images), CAFFE_CUDA_NUM_THREADS, // blockDim.y == 1 0, context_.cuda_stream()>>>( d_sorted_conv_layer_indexes, nboxes_to_generate, d_bbox_deltas, reinterpret_cast<const RotatedBox*>(d_anchors), H, W, A, feat_stride_, rpn_min_size_, d_im_info_vec, num_images, utils::BBOX_XFORM_CLIP_DEFAULT, legacy_plus_one_, angle_bound_on_, angle_bound_lo_, angle_bound_hi_, clip_angle_thresh_, reinterpret_cast<RotatedBox*>(d_boxes), nboxes_to_generate, d_sorted_scores, d_boxes_keep_flags); } const int nboxes_generated = nboxes_to_generate; dev_image_prenms_boxes_.Resize(box_dim * nboxes_generated); float* d_image_prenms_boxes = dev_image_prenms_boxes_.template mutable_data<float>(); dev_image_prenms_scores_.Resize(nboxes_generated); float* d_image_prenms_scores = dev_image_prenms_scores_.template mutable_data<float>(); dev_image_boxes_keep_list_.Resize(nboxes_generated); int* d_image_boxes_keep_list = dev_image_boxes_keep_list_.template mutable_data<int>(); const int roi_cols = box_dim + 1; const int max_postnms_nboxes = std::min(nboxes_generated, rpn_post_nms_topN_); dev_postnms_rois_.Resize(roi_cols * num_images * max_postnms_nboxes); dev_postnms_rois_probs_.Resize(num_images * max_postnms_nboxes); float* d_postnms_rois = dev_postnms_rois_.template mutable_data<float>(); float* d_postnms_rois_probs = dev_postnms_rois_probs_.template mutable_data<float>(); dev_prenms_nboxes_.Resize(num_images); host_prenms_nboxes_.Resize(num_images); int* d_prenms_nboxes = dev_prenms_nboxes_.template mutable_data<int>(); int* h_prenms_nboxes = host_prenms_nboxes_.template mutable_data<int>(); int nrois_in_output = 0; for (int image_index = 0; image_index < num_images; ++image_index) { // Sub matrices for current image const float* d_image_boxes = &d_boxes[image_index * nboxes_generated * box_dim]; const float* d_image_sorted_scores = &d_sorted_scores[image_index * K * A]; char* d_image_boxes_keep_flags = &d_boxes_keep_flags[image_index * nboxes_generated]; float* d_image_postnms_rois = &d_postnms_rois[roi_cols * nrois_in_output]; float* d_image_postnms_rois_probs = &d_postnms_rois_probs[nrois_in_output]; // Moving valid boxes (ie the ones with d_boxes_keep_flags[ibox] == true) // to the output tensors if (box_dim == 4) { cub::DeviceSelect::Flagged( d_cub_select_temp_storage, cub_select_temp_storage_bytes, reinterpret_cast<const float4*>(d_image_boxes), d_image_boxes_keep_flags, reinterpret_cast<float4*>(d_image_prenms_boxes), d_prenms_nboxes, nboxes_generated, context_.cuda_stream()); } else { cub::DeviceSelect::Flagged( d_cub_select_temp_storage, cub_select_temp_storage_bytes, reinterpret_cast<const RotatedBox*>(d_image_boxes), d_image_boxes_keep_flags, reinterpret_cast<RotatedBox*>(d_image_prenms_boxes), d_prenms_nboxes, nboxes_generated, context_.cuda_stream()); } cub::DeviceSelect::Flagged( d_cub_select_temp_storage, cub_select_temp_storage_bytes, d_image_sorted_scores, d_image_boxes_keep_flags, d_image_prenms_scores, d_prenms_nboxes, nboxes_generated, context_.cuda_stream()); host_prenms_nboxes_.CopyFrom(dev_prenms_nboxes_); // We know prenms_boxes <= topN_prenms, because nboxes_generated <= // topN_prenms. Calling NMS on the generated boxes const int prenms_nboxes = *h_prenms_nboxes; int nkeep; utils::nms_gpu( d_image_prenms_boxes, prenms_nboxes, rpn_nms_thresh_, legacy_plus_one_, d_image_boxes_keep_list, &nkeep, dev_nms_mask_, host_nms_mask_, &context_, box_dim); // All operations done after previous sort were keeping the relative order // of the elements the elements are still sorted keep topN <=> truncate the // array const int postnms_nboxes = std::min(nkeep, rpn_post_nms_topN_); // Moving the out boxes to the output tensors, // adding the image_index dimension on the fly if (box_dim == 4) { WriteUprightBoxesOutput<<< CAFFE_GET_BLOCKS(postnms_nboxes), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( reinterpret_cast<const float4*>(d_image_prenms_boxes), d_image_prenms_scores, d_image_boxes_keep_list, postnms_nboxes, image_index, d_image_postnms_rois, d_image_postnms_rois_probs); } else { WriteRotatedBoxesOutput<<< CAFFE_GET_BLOCKS(postnms_nboxes), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( reinterpret_cast<const RotatedBox*>(d_image_prenms_boxes), d_image_prenms_scores, d_image_boxes_keep_list, postnms_nboxes, image_index, d_image_postnms_rois, d_image_postnms_rois_probs); } nrois_in_output += postnms_nboxes; } // Using a buffer because we cannot call ShrinkTo out_rois->Resize(nrois_in_output, roi_cols); out_rois_probs->Resize(nrois_in_output); float* d_out_rois = out_rois->template mutable_data<float>(); float* d_out_rois_probs = out_rois_probs->template mutable_data<float>(); CUDA_CHECK(cudaMemcpyAsync( d_out_rois, d_postnms_rois, nrois_in_output * roi_cols * sizeof(float), cudaMemcpyDeviceToDevice, context_.cuda_stream())); CUDA_CHECK(cudaMemcpyAsync( d_out_rois_probs, d_postnms_rois_probs, nrois_in_output * sizeof(float), cudaMemcpyDeviceToDevice, context_.cuda_stream())); return true; } REGISTER_CUDA_OPERATOR(GenerateProposals, GenerateProposalsOp<CUDAContext>); } // namespace caffe2 C10_REGISTER_CAFFE2_OPERATOR_CUDA( GenerateProposals, caffe2::GenerateProposalsOp<caffe2::CUDAContext>);
a4f8bef6e8d15e8086d7a525ba5bfd2d472df2f8.hip
// !!! This is a file automatically generated by hipify!!! #include "chronoGPU.hpp" #include "common.hpp" #include <iostream> using namespace std; ChronoGPU::ChronoGPU() : m_started( false ) { HANDLE_ERROR( hipEventCreate( &m_start ) ); HANDLE_ERROR( hipEventCreate( &m_end ) ); } ChronoGPU::~ChronoGPU() { if ( m_started ) { stop(); std::cerr << "ChronoGPU::~ChronoGPU(): hrono wasn't turned off!" << std::endl; } HANDLE_ERROR( hipEventDestroy( m_start ) ); HANDLE_ERROR( hipEventDestroy( m_end ) ); } void ChronoGPU::start() { if ( !m_started ) { HANDLE_ERROR( hipEventRecord( m_start, 0 ) ); m_started = true; } else std::cerr << "ChronoGPU::start(): chrono is already started!" << std::endl; } void ChronoGPU::stop() { if ( m_started ) { HANDLE_ERROR( hipEventRecord( m_end, 0 ) ); HANDLE_ERROR( hipEventSynchronize( m_end ) ); m_started = false; } else std::cerr << "ChronoGPU::stop(): chrono wasn't started!" << std::endl; } float ChronoGPU::elapsedTime() { float time = 0.f; HANDLE_ERROR( hipEventElapsedTime( &time, m_start, m_end ) ); return time; }
a4f8bef6e8d15e8086d7a525ba5bfd2d472df2f8.cu
#include "chronoGPU.hpp" #include "common.hpp" #include <iostream> using namespace std; ChronoGPU::ChronoGPU() : m_started( false ) { HANDLE_ERROR( cudaEventCreate( &m_start ) ); HANDLE_ERROR( cudaEventCreate( &m_end ) ); } ChronoGPU::~ChronoGPU() { if ( m_started ) { stop(); std::cerr << "ChronoGPU::~ChronoGPU(): hrono wasn't turned off!" << std::endl; } HANDLE_ERROR( cudaEventDestroy( m_start ) ); HANDLE_ERROR( cudaEventDestroy( m_end ) ); } void ChronoGPU::start() { if ( !m_started ) { HANDLE_ERROR( cudaEventRecord( m_start, 0 ) ); m_started = true; } else std::cerr << "ChronoGPU::start(): chrono is already started!" << std::endl; } void ChronoGPU::stop() { if ( m_started ) { HANDLE_ERROR( cudaEventRecord( m_end, 0 ) ); HANDLE_ERROR( cudaEventSynchronize( m_end ) ); m_started = false; } else std::cerr << "ChronoGPU::stop(): chrono wasn't started!" << std::endl; } float ChronoGPU::elapsedTime() { float time = 0.f; HANDLE_ERROR( cudaEventElapsedTime( &time, m_start, m_end ) ); return time; }
45799c13f60d0c998566ebc5a96ae18ccabcc0c8.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <torch/extension.h> #include "cuda_utils.h" #include <vector> template <typename scalar_t> __global__ void chamfer_dist_kernel(int batch_size, int n, const scalar_t* __restrict__ xyz1, int m, const scalar_t* __restrict__ xyz2, scalar_t* __restrict__ dist, int* indexes) { const int batch = 512; __shared__ scalar_t buf[batch * 3]; for (int i = blockIdx.x; i < batch_size; i += gridDim.x) { for (int k2 = 0; k2 < m; k2 += batch) { int end_k = min(m, k2 + batch) - k2; for (int j = threadIdx.x; j < end_k * 3; j += blockDim.x) { buf[j] = xyz2[(i * m + k2) * 3 + j]; } __syncthreads(); for (int j = threadIdx.x + blockIdx.y * blockDim.x; j < n; j += blockDim.x * gridDim.y) { scalar_t x1 = xyz1[(i * n + j) * 3 + 0]; scalar_t y1 = xyz1[(i * n + j) * 3 + 1]; scalar_t z1 = xyz1[(i * n + j) * 3 + 2]; scalar_t best_dist = 0; int best_dist_index = 0; int end_ka = end_k - (end_k & 3); if (end_ka == batch) { for (int k = 0; k < batch; k += 4) { { scalar_t x2 = buf[k * 3 + 0] - x1; scalar_t y2 = buf[k * 3 + 1] - y1; scalar_t z2 = buf[k * 3 + 2] - z1; scalar_t dist = x2 * x2 + y2 * y2 + z2 * z2; if (k == 0 || dist < best_dist) { best_dist = dist; best_dist_index = k + k2; } } { scalar_t x2 = buf[k * 3 + 3] - x1; scalar_t y2 = buf[k * 3 + 4] - y1; scalar_t z2 = buf[k * 3 + 5] - z1; scalar_t dist = x2 * x2 + y2 * y2 + z2 * z2; if (dist < best_dist) { best_dist = dist; best_dist_index = k + k2 + 1; } } { scalar_t x2 = buf[k * 3 + 6] - x1; scalar_t y2 = buf[k * 3 + 7] - y1; scalar_t z2 = buf[k * 3 + 8] - z1; scalar_t dist = x2 * x2 + y2 * y2 + z2 * z2; if (dist < best_dist) { best_dist = dist; best_dist_index = k + k2 + 2; } } { scalar_t x2 = buf[k * 3 + 9] - x1; scalar_t y2 = buf[k * 3 + 10] - y1; scalar_t z2 = buf[k * 3 + 11] - z1; scalar_t dist = x2 * x2 + y2 * y2 + z2 * z2; if (dist < best_dist) { best_dist = dist; best_dist_index = k + k2 + 3; } } } } else { for (int k = 0; k < end_ka; k += 4) { { scalar_t x2 = buf[k * 3 + 0] - x1; scalar_t y2 = buf[k * 3 + 1] - y1; scalar_t z2 = buf[k * 3 + 2] - z1; scalar_t dist = x2 * x2 + y2 * y2 + z2 * z2; if (k == 0 || dist < best_dist) { best_dist = dist; best_dist_index = k + k2; } } { scalar_t x2 = buf[k * 3 + 3] - x1; scalar_t y2 = buf[k * 3 + 4] - y1; scalar_t z2 = buf[k * 3 + 5] - z1; scalar_t dist = x2 * x2 + y2 * y2 + z2 * z2; if (dist < best_dist) { best_dist = dist; best_dist_index = k + k2 + 1; } } { scalar_t x2 = buf[k * 3 + 6] - x1; scalar_t y2 = buf[k * 3 + 7] - y1; scalar_t z2 = buf[k * 3 + 8] - z1; scalar_t dist = x2 * x2 + y2 * y2 + z2 * z2; if (dist < best_dist) { best_dist = dist; best_dist_index = k + k2 + 2; } } { scalar_t x2 = buf[k * 3 + 9] - x1; scalar_t y2 = buf[k * 3 + 10] - y1; scalar_t z2 = buf[k * 3 + 11] - z1; scalar_t dist = x2 * x2 + y2 * y2 + z2 * z2; if (dist < best_dist) { best_dist = dist; best_dist_index = k + k2 + 3; } } } } for (int k = end_ka; k < end_k; k++) { scalar_t x2 = buf[k * 3 + 0] - x1; scalar_t y2 = buf[k * 3 + 1] - y1; scalar_t z2 = buf[k * 3 + 2] - z1; scalar_t dist = x2 * x2 + y2 * y2 + z2 * z2; if (k == 0 || dist < best_dist) { best_dist = dist; best_dist_index = k + k2; } } if (k2 == 0 || dist[(i * n + j)] > best_dist) { dist[(i * n + j)] = best_dist; indexes[(i * n + j)] = best_dist_index; } } __syncthreads(); } } } std::vector<torch::Tensor> chamfer_dist_kernel_wrapper(torch::Tensor xyz1, torch::Tensor xyz2) { const int batch_size = xyz1.size(0); const int n = xyz1.size(1); // num_points point cloud A const int m = xyz2.size(1); // num_points point cloud B torch::Tensor dist1 = torch::zeros({batch_size, n}, torch::CUDA(xyz1.scalar_type())); torch::Tensor dist2 = torch::zeros({batch_size, m}, torch::CUDA(xyz1.scalar_type())); torch::Tensor idx1 = torch::zeros({batch_size, n}, torch::CUDA(torch::kInt)); torch::Tensor idx2 = torch::zeros({batch_size, m}, torch::CUDA(torch::kInt)); AT_DISPATCH_FLOATING_TYPES( xyz1.scalar_type(), "chamfer_dist_cuda", ([&] { hipLaunchKernelGGL(( chamfer_dist_kernel<scalar_t>), dim3(dim3(32, 16, 1)), dim3(512), 0, 0, batch_size, n, xyz1.data_ptr<scalar_t>(), m, xyz2.data_ptr<scalar_t>(), dist1.data_ptr<scalar_t>(), idx1.data_ptr<int>()); hipLaunchKernelGGL(( chamfer_dist_kernel<scalar_t>), dim3(dim3(32, 16, 1)), dim3(512), 0, 0, batch_size, m, xyz2.data_ptr<scalar_t>(), n, xyz1.data_ptr<scalar_t>(), dist2.data_ptr<scalar_t>(), idx2.data_ptr<int>()); })); hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("Error in chamfer_dist_kernel_wrapper: %s\n", hipGetErrorString(err)); } return {dist1, dist2, idx1, idx2}; } template <typename scalar_t> __global__ void chamfer_dist_grad_kernel(int b, int n, const scalar_t* __restrict__ xyz1, int m, const scalar_t* __restrict__ xyz2, const scalar_t* __restrict__ grad_dist1, const int* idx1, scalar_t* __restrict__ grad_xyz1, scalar_t* __restrict__ grad_xyz2) { for (int i = blockIdx.x; i < b; i += gridDim.x) { for (int j = threadIdx.x + blockIdx.y * blockDim.x; j < n; j += blockDim.x * gridDim.y) { scalar_t x1 = xyz1[(i * n + j) * 3 + 0]; scalar_t y1 = xyz1[(i * n + j) * 3 + 1]; scalar_t z1 = xyz1[(i * n + j) * 3 + 2]; int j2 = idx1[i * n + j]; scalar_t x2 = xyz2[(i * m + j2) * 3 + 0]; scalar_t y2 = xyz2[(i * m + j2) * 3 + 1]; scalar_t z2 = xyz2[(i * m + j2) * 3 + 2]; scalar_t g = grad_dist1[i * n + j] * 2; atomicAdd(&(grad_xyz1[(i * n + j) * 3 + 0]), g * (x1 - x2)); atomicAdd(&(grad_xyz1[(i * n + j) * 3 + 1]), g * (y1 - y2)); atomicAdd(&(grad_xyz1[(i * n + j) * 3 + 2]), g * (z1 - z2)); atomicAdd(&(grad_xyz2[(i * m + j2) * 3 + 0]), -(g * (x1 - x2))); atomicAdd(&(grad_xyz2[(i * m + j2) * 3 + 1]), -(g * (y1 - y2))); atomicAdd(&(grad_xyz2[(i * m + j2) * 3 + 2]), -(g * (z1 - z2))); } } } std::vector<torch::Tensor> chamfer_dist_grad_kernel_wrapper(torch::Tensor xyz1, torch::Tensor xyz2, torch::Tensor idx1, torch::Tensor idx2, torch::Tensor grad_dist1, torch::Tensor grad_dist2) { const int batch_size = xyz1.size(0); const int n = xyz1.size(1); // num_points point cloud A const int m = xyz2.size(1); // num_points point cloud B torch::Tensor grad_xyz1 = torch::zeros_like(xyz1); torch::Tensor grad_xyz2 = torch::zeros_like(xyz2); AT_DISPATCH_FLOATING_TYPES( xyz1.scalar_type(), "chamfer_dist_grad_cuda", ([&] { hipLaunchKernelGGL(( chamfer_dist_grad_kernel<scalar_t>), dim3(dim3(1, 16, 1)), dim3(256), 0, 0, batch_size, n, xyz1.data_ptr<scalar_t>(), m, xyz2.data_ptr<scalar_t>(), grad_dist1.data_ptr<scalar_t>(), idx1.data_ptr<int>(), grad_xyz1.data_ptr<scalar_t>(), grad_xyz2.data_ptr<scalar_t>()); hipLaunchKernelGGL(( chamfer_dist_grad_kernel<scalar_t>), dim3(dim3(1, 16, 1)), dim3(256), 0, 0, batch_size, m, xyz2.data_ptr<scalar_t>(), n, xyz1.data_ptr<scalar_t>(), grad_dist2.data_ptr<scalar_t>(), idx2.data_ptr<int>(), grad_xyz2.data_ptr<scalar_t>(), grad_xyz1.data_ptr<scalar_t>()); })); hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("Error in chamfer_dist_grad_kernel_wrapper: %s\n", hipGetErrorString(err)); } return {grad_xyz1, grad_xyz2}; }
45799c13f60d0c998566ebc5a96ae18ccabcc0c8.cu
#include <cuda.h> #include <cuda_runtime.h> #include <torch/extension.h> #include "cuda_utils.h" #include <vector> template <typename scalar_t> __global__ void chamfer_dist_kernel(int batch_size, int n, const scalar_t* __restrict__ xyz1, int m, const scalar_t* __restrict__ xyz2, scalar_t* __restrict__ dist, int* indexes) { const int batch = 512; __shared__ scalar_t buf[batch * 3]; for (int i = blockIdx.x; i < batch_size; i += gridDim.x) { for (int k2 = 0; k2 < m; k2 += batch) { int end_k = min(m, k2 + batch) - k2; for (int j = threadIdx.x; j < end_k * 3; j += blockDim.x) { buf[j] = xyz2[(i * m + k2) * 3 + j]; } __syncthreads(); for (int j = threadIdx.x + blockIdx.y * blockDim.x; j < n; j += blockDim.x * gridDim.y) { scalar_t x1 = xyz1[(i * n + j) * 3 + 0]; scalar_t y1 = xyz1[(i * n + j) * 3 + 1]; scalar_t z1 = xyz1[(i * n + j) * 3 + 2]; scalar_t best_dist = 0; int best_dist_index = 0; int end_ka = end_k - (end_k & 3); if (end_ka == batch) { for (int k = 0; k < batch; k += 4) { { scalar_t x2 = buf[k * 3 + 0] - x1; scalar_t y2 = buf[k * 3 + 1] - y1; scalar_t z2 = buf[k * 3 + 2] - z1; scalar_t dist = x2 * x2 + y2 * y2 + z2 * z2; if (k == 0 || dist < best_dist) { best_dist = dist; best_dist_index = k + k2; } } { scalar_t x2 = buf[k * 3 + 3] - x1; scalar_t y2 = buf[k * 3 + 4] - y1; scalar_t z2 = buf[k * 3 + 5] - z1; scalar_t dist = x2 * x2 + y2 * y2 + z2 * z2; if (dist < best_dist) { best_dist = dist; best_dist_index = k + k2 + 1; } } { scalar_t x2 = buf[k * 3 + 6] - x1; scalar_t y2 = buf[k * 3 + 7] - y1; scalar_t z2 = buf[k * 3 + 8] - z1; scalar_t dist = x2 * x2 + y2 * y2 + z2 * z2; if (dist < best_dist) { best_dist = dist; best_dist_index = k + k2 + 2; } } { scalar_t x2 = buf[k * 3 + 9] - x1; scalar_t y2 = buf[k * 3 + 10] - y1; scalar_t z2 = buf[k * 3 + 11] - z1; scalar_t dist = x2 * x2 + y2 * y2 + z2 * z2; if (dist < best_dist) { best_dist = dist; best_dist_index = k + k2 + 3; } } } } else { for (int k = 0; k < end_ka; k += 4) { { scalar_t x2 = buf[k * 3 + 0] - x1; scalar_t y2 = buf[k * 3 + 1] - y1; scalar_t z2 = buf[k * 3 + 2] - z1; scalar_t dist = x2 * x2 + y2 * y2 + z2 * z2; if (k == 0 || dist < best_dist) { best_dist = dist; best_dist_index = k + k2; } } { scalar_t x2 = buf[k * 3 + 3] - x1; scalar_t y2 = buf[k * 3 + 4] - y1; scalar_t z2 = buf[k * 3 + 5] - z1; scalar_t dist = x2 * x2 + y2 * y2 + z2 * z2; if (dist < best_dist) { best_dist = dist; best_dist_index = k + k2 + 1; } } { scalar_t x2 = buf[k * 3 + 6] - x1; scalar_t y2 = buf[k * 3 + 7] - y1; scalar_t z2 = buf[k * 3 + 8] - z1; scalar_t dist = x2 * x2 + y2 * y2 + z2 * z2; if (dist < best_dist) { best_dist = dist; best_dist_index = k + k2 + 2; } } { scalar_t x2 = buf[k * 3 + 9] - x1; scalar_t y2 = buf[k * 3 + 10] - y1; scalar_t z2 = buf[k * 3 + 11] - z1; scalar_t dist = x2 * x2 + y2 * y2 + z2 * z2; if (dist < best_dist) { best_dist = dist; best_dist_index = k + k2 + 3; } } } } for (int k = end_ka; k < end_k; k++) { scalar_t x2 = buf[k * 3 + 0] - x1; scalar_t y2 = buf[k * 3 + 1] - y1; scalar_t z2 = buf[k * 3 + 2] - z1; scalar_t dist = x2 * x2 + y2 * y2 + z2 * z2; if (k == 0 || dist < best_dist) { best_dist = dist; best_dist_index = k + k2; } } if (k2 == 0 || dist[(i * n + j)] > best_dist) { dist[(i * n + j)] = best_dist; indexes[(i * n + j)] = best_dist_index; } } __syncthreads(); } } } std::vector<torch::Tensor> chamfer_dist_kernel_wrapper(torch::Tensor xyz1, torch::Tensor xyz2) { const int batch_size = xyz1.size(0); const int n = xyz1.size(1); // num_points point cloud A const int m = xyz2.size(1); // num_points point cloud B torch::Tensor dist1 = torch::zeros({batch_size, n}, torch::CUDA(xyz1.scalar_type())); torch::Tensor dist2 = torch::zeros({batch_size, m}, torch::CUDA(xyz1.scalar_type())); torch::Tensor idx1 = torch::zeros({batch_size, n}, torch::CUDA(torch::kInt)); torch::Tensor idx2 = torch::zeros({batch_size, m}, torch::CUDA(torch::kInt)); AT_DISPATCH_FLOATING_TYPES( xyz1.scalar_type(), "chamfer_dist_cuda", ([&] { chamfer_dist_kernel<scalar_t><<<dim3(32, 16, 1), 512>>>( batch_size, n, xyz1.data_ptr<scalar_t>(), m, xyz2.data_ptr<scalar_t>(), dist1.data_ptr<scalar_t>(), idx1.data_ptr<int>()); chamfer_dist_kernel<scalar_t><<<dim3(32, 16, 1), 512>>>( batch_size, m, xyz2.data_ptr<scalar_t>(), n, xyz1.data_ptr<scalar_t>(), dist2.data_ptr<scalar_t>(), idx2.data_ptr<int>()); })); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("Error in chamfer_dist_kernel_wrapper: %s\n", cudaGetErrorString(err)); } return {dist1, dist2, idx1, idx2}; } template <typename scalar_t> __global__ void chamfer_dist_grad_kernel(int b, int n, const scalar_t* __restrict__ xyz1, int m, const scalar_t* __restrict__ xyz2, const scalar_t* __restrict__ grad_dist1, const int* idx1, scalar_t* __restrict__ grad_xyz1, scalar_t* __restrict__ grad_xyz2) { for (int i = blockIdx.x; i < b; i += gridDim.x) { for (int j = threadIdx.x + blockIdx.y * blockDim.x; j < n; j += blockDim.x * gridDim.y) { scalar_t x1 = xyz1[(i * n + j) * 3 + 0]; scalar_t y1 = xyz1[(i * n + j) * 3 + 1]; scalar_t z1 = xyz1[(i * n + j) * 3 + 2]; int j2 = idx1[i * n + j]; scalar_t x2 = xyz2[(i * m + j2) * 3 + 0]; scalar_t y2 = xyz2[(i * m + j2) * 3 + 1]; scalar_t z2 = xyz2[(i * m + j2) * 3 + 2]; scalar_t g = grad_dist1[i * n + j] * 2; atomicAdd(&(grad_xyz1[(i * n + j) * 3 + 0]), g * (x1 - x2)); atomicAdd(&(grad_xyz1[(i * n + j) * 3 + 1]), g * (y1 - y2)); atomicAdd(&(grad_xyz1[(i * n + j) * 3 + 2]), g * (z1 - z2)); atomicAdd(&(grad_xyz2[(i * m + j2) * 3 + 0]), -(g * (x1 - x2))); atomicAdd(&(grad_xyz2[(i * m + j2) * 3 + 1]), -(g * (y1 - y2))); atomicAdd(&(grad_xyz2[(i * m + j2) * 3 + 2]), -(g * (z1 - z2))); } } } std::vector<torch::Tensor> chamfer_dist_grad_kernel_wrapper(torch::Tensor xyz1, torch::Tensor xyz2, torch::Tensor idx1, torch::Tensor idx2, torch::Tensor grad_dist1, torch::Tensor grad_dist2) { const int batch_size = xyz1.size(0); const int n = xyz1.size(1); // num_points point cloud A const int m = xyz2.size(1); // num_points point cloud B torch::Tensor grad_xyz1 = torch::zeros_like(xyz1); torch::Tensor grad_xyz2 = torch::zeros_like(xyz2); AT_DISPATCH_FLOATING_TYPES( xyz1.scalar_type(), "chamfer_dist_grad_cuda", ([&] { chamfer_dist_grad_kernel<scalar_t><<<dim3(1, 16, 1), 256>>>( batch_size, n, xyz1.data_ptr<scalar_t>(), m, xyz2.data_ptr<scalar_t>(), grad_dist1.data_ptr<scalar_t>(), idx1.data_ptr<int>(), grad_xyz1.data_ptr<scalar_t>(), grad_xyz2.data_ptr<scalar_t>()); chamfer_dist_grad_kernel<scalar_t><<<dim3(1, 16, 1), 256>>>( batch_size, m, xyz2.data_ptr<scalar_t>(), n, xyz1.data_ptr<scalar_t>(), grad_dist2.data_ptr<scalar_t>(), idx2.data_ptr<int>(), grad_xyz2.data_ptr<scalar_t>(), grad_xyz1.data_ptr<scalar_t>()); })); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("Error in chamfer_dist_grad_kernel_wrapper: %s\n", cudaGetErrorString(err)); } return {grad_xyz1, grad_xyz2}; }
83bbe56d545f179d70f21df4ee3930b5ea940fbd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <ctime> #include <climits> #include <helper_cuda.h> #define R 16 #define L 8 //Don't increase L beyond 8 #define MAX_GRID_SIZE 2147483646 #define BLOCK_DIM_SORT 192 #define NUMBER_OF_GROUPS_PER_BLOCK 12 #define NUM_RADICES (1<<L) #define NUM_BLOCKS 16//ceil((float)size/BLOCK_DIM_SORT) //If the size is 3072 = 192*16 #define ARRAY_SIZE 3072 #define NUM_GROUPS NUMBER_OF_GROUPS_PER_BLOCK * NUM_BLOCKS #define NUM_RADICES_PER_BLOCK 16 // NUM_RADICES/NUM_BLOCK = 256/8 = 32 // int cellID[ARRAY_SIZE]; // int objectID[ARRAY_SIZE]; __device__ __host__ int getAddress(int a, int b, int c){ //d1: radices //d2: thread blocks //d3: thread groups //a: radix, b: thread block, c: thread group // int d1 = NUM_RADICES; int d2 = NUM_BLOCKS; int d3 = NUMBER_OF_GROUPS_PER_BLOCK; return (d2*d3*a + d3*b + c); //http://stackoverflow.com/questions/789913/array-offset-calculations-in-multi-dimensional-array-column-vs-row-major } void __global__ phase_1_kernel(int *d_cellID, int *d_objectID, int size, int partition_size, int Num_Elements_Per_Group, int pass, int * d_counters){ __shared__ int shared_counters[NUMBER_OF_GROUPS_PER_BLOCK][NUM_RADICES]; if(threadIdx.x%R==0){ for (int i = 0; i < NUM_RADICES; ++i){ shared_counters[threadIdx.x/R][i] = 0; } } __syncthreads(); int firstCellID =(blockIdx.x*NUMBER_OF_GROUPS_PER_BLOCK + threadIdx.x/R)*Num_Elements_Per_Group + threadIdx.x%R; unsigned int mask = 0; for (int i = 0; i < L; ++i) mask = mask<<1 | 1; mask = mask << (pass*L); for (int i = firstCellID; i < firstCellID + R*partition_size; i+=R) { unsigned int masked_number = d_cellID[i] & (mask); masked_number = masked_number >> (L*pass); atomicInc((unsigned int*)&shared_counters[threadIdx.x/R][masked_number], INT_MAX); } __syncthreads(); if(threadIdx.x%R == 0){ for (int i = 0; i < NUM_RADICES; ++i) { d_counters[getAddress(i,blockIdx.x,threadIdx.x/R)] = shared_counters[threadIdx.x/R][i]; } } } void launch_kernel_phase_1(int* d_cellID, int * d_objectID, int size, int pass, int* d_counters){ //keep launch config of all kernels same dim3 grid(ceil((float)size/BLOCK_DIM_SORT)); dim3 block(BLOCK_DIM_SORT,1); int partition_size = ceil((float)size/MAX_GRID_SIZE); int Num_Elements_Per_Group = R*partition_size; hipLaunchKernelGGL(( phase_1_kernel) , dim3(grid), dim3(block), 0, 0, d_cellID, d_objectID, size, partition_size, Num_Elements_Per_Group, pass, d_counters); } void __global__ phase_2_kernel(int *d_cellID, int *d_objectID, int size, int partition_size, int Num_Elements_Per_Group, int pass, int * d_counters, int *d_partial_prefix_sums_per_radix){ int lowestRadixForBlock = NUM_RADICES_PER_BLOCK*blockIdx.x ; int highestRadixForBlock = lowestRadixForBlock + NUM_RADICES_PER_BLOCK - 1; //Both radices are included in the radix range for this group __shared__ int shared_counters[NUM_RADICES_PER_BLOCK][NUM_GROUPS]; if(threadIdx.x < NUM_RADICES_PER_BLOCK) { int i = threadIdx.x; for(int j = 0; j < NUM_GROUPS; j++) { shared_counters[i][j] = 0; } } __syncthreads(); if(threadIdx.x <= highestRadixForBlock - lowestRadixForBlock) { int i = threadIdx.x + lowestRadixForBlock; for (int j = 0; j < NUM_GROUPS; ++j) { shared_counters[i - lowestRadixForBlock][j] = d_counters[getAddress(i,j/NUMBER_OF_GROUPS_PER_BLOCK, j%NUMBER_OF_GROUPS_PER_BLOCK)]; } } __syncthreads(); //Prefix sum naive implementation for shared memory if(threadIdx.x <= highestRadixForBlock - lowestRadixForBlock) { int i = threadIdx.x + lowestRadixForBlock; for(int j = 1;j<NUM_GROUPS; j++){ shared_counters[i - lowestRadixForBlock][j] = shared_counters[i - lowestRadixForBlock][j] + shared_counters[i - lowestRadixForBlock][j-1]; } } __syncthreads(); if(threadIdx.x <= highestRadixForBlock - lowestRadixForBlock) { int i = threadIdx.x + lowestRadixForBlock; for (int j = 0; j < NUM_GROUPS; ++j) { d_counters[getAddress(i,j/NUMBER_OF_GROUPS_PER_BLOCK, j%NUMBER_OF_GROUPS_PER_BLOCK)] = shared_counters[i - lowestRadixForBlock][j]; } d_partial_prefix_sums_per_radix[i] = shared_counters[i - lowestRadixForBlock][NUM_GROUPS-1]; } } void launch_kernel_phase_2(int* d_cellID, int * d_objectID, int size, int pass, int* d_counters, int* d_partial_prefix_sums_per_radix){ //keep launch config of all kernels same dim3 grid(NUM_RADICES/NUM_RADICES_PER_BLOCK); dim3 block(BLOCK_DIM_SORT,1); int partition_size = ceil((float)size/MAX_GRID_SIZE); int Num_Elements_Per_Group = R*partition_size; hipLaunchKernelGGL(( phase_2_kernel) , dim3(grid), dim3(block), 0, 0, d_cellID, d_objectID, size, partition_size, Num_Elements_Per_Group, pass, d_counters, d_partial_prefix_sums_per_radix); } void __global__ phase_3_kernel(int *d_cellID, int *d_objectID, int size, int partition_size, int Num_Elements_Per_Group, int pass, int * d_counters, int* d_partial_prefix_sums_per_radix, int * d_sorted_cellID, int* d_sorted_objectID){ __shared__ int shared_parallel_prefix[NUM_RADICES]; __shared__ int shared_counters[NUMBER_OF_GROUPS_PER_BLOCK][NUM_RADICES]; if(threadIdx.x%R==0){ for (int i = 0; i < NUM_RADICES; ++i){ shared_counters[threadIdx.x/R][i] = 0; } } __syncthreads(); if(threadIdx.x == 0) { for (int i = 0; i < NUM_RADICES; ++i) { shared_parallel_prefix[i] = d_partial_prefix_sums_per_radix[i]; } } __syncthreads(); if(threadIdx.x == 0){ for (int i = 1; i < NUM_RADICES; ++i) { shared_parallel_prefix[i] = shared_parallel_prefix[i-1] + shared_parallel_prefix[i]; } } __syncthreads(); if(threadIdx.x%R == 0){ for (int i = 0; i < NUM_RADICES; ++i) { shared_counters[threadIdx.x/R][i] = d_counters[getAddress(i,blockIdx.x,threadIdx.x/R)]; if(i>0) shared_counters[threadIdx.x/R][i] += shared_parallel_prefix[i-1]; } } __syncthreads(); int firstCellID =(blockIdx.x*NUMBER_OF_GROUPS_PER_BLOCK + threadIdx.x/R)*Num_Elements_Per_Group + threadIdx.x%R; unsigned int mask = 0; for (int i = 0; i < L; ++i) mask = mask<<1 | 1; mask = mask << (pass*L); for (int i = firstCellID; i < firstCellID + R*partition_size; i+=R) { int masked_number = d_cellID[i] & (mask); masked_number = masked_number >> (L*pass); if(shared_counters[threadIdx.x/R][masked_number] != ARRAY_SIZE) //TODO: Remove this if condition by fixing the math d_sorted_cellID[shared_counters[threadIdx.x/R][masked_number]] += masked_number << L*pass; masked_number = d_objectID[i] & (mask); masked_number = masked_number >> (L*pass); if(shared_counters[threadIdx.x/R][masked_number] != ARRAY_SIZE) //TODO: Remove this if condition by fixing the math d_sorted_objectID[shared_counters[threadIdx.x/R][masked_number]] += masked_number << L*pass; atomicInc((unsigned int*)&shared_counters[threadIdx.x/R][masked_number], INT_MAX); } } void launch_kernel_phase_3(int* d_cellID, int * d_objectID, int size, int pass, int* d_counters, int *d_partial_prefix_sums_per_radix, int * d_sorted_cellID, int* d_sorted_objectID){ //keep launch config of all kernels same dim3 grid(ceil((float)size/BLOCK_DIM_SORT)); dim3 block(BLOCK_DIM_SORT,1); int partition_size = ceil((float)size/MAX_GRID_SIZE); int Num_Elements_Per_Group = R*partition_size; hipLaunchKernelGGL(( phase_3_kernel) , dim3(grid), dim3(block), 0, 0, d_cellID, d_objectID, size, partition_size, Num_Elements_Per_Group, pass, d_counters, d_partial_prefix_sums_per_radix,d_sorted_cellID, d_sorted_objectID); } void sort(int *d_cellID, int *d_objectID){ // int * d_cellID, *d_objectID; // checkCudaErrors(hipMalloc(&d_cellID, ARRAY_SIZE*sizeof(int))); // checkCudaErrors(hipMalloc(&d_objectID, ARRAY_SIZE*sizeof(int))); // checkCudaErrors(hipMemcpy(d_cellID, cellID, ARRAY_SIZE*sizeof(int),hipMemcpyHostToDevice)); // checkCudaErrors(hipMemcpy(d_objectID, objectID, ARRAY_SIZE*sizeof(int), hipMemcpyHostToDevice)); int * d_counters; checkCudaErrors(hipMalloc(&d_counters, NUM_RADICES * NUM_BLOCKS * NUMBER_OF_GROUPS_PER_BLOCK * sizeof(int))); int* d_partial_prefix_sums_per_radix; checkCudaErrors(hipMalloc(&d_partial_prefix_sums_per_radix, sizeof(int) * NUM_RADICES)); int *d_sorted_cellID; checkCudaErrors(hipMalloc(&d_sorted_cellID, ARRAY_SIZE*sizeof(int))); checkCudaErrors(hipMemset(d_sorted_cellID, 0, ARRAY_SIZE*sizeof(int))); int *d_sorted_objectID; checkCudaErrors(hipMalloc(&d_sorted_objectID, ARRAY_SIZE*sizeof(int))); checkCudaErrors(hipMemset(d_sorted_objectID, 0, ARRAY_SIZE*sizeof(int))); for(int i = 0; i < 4 ; i++) { //printf("Pass %d\n", i); checkCudaErrors(hipMemset(d_counters, 0, NUM_RADICES * NUM_BLOCKS * NUMBER_OF_GROUPS_PER_BLOCK * sizeof(int) )); launch_kernel_phase_1(d_cellID, d_objectID, ARRAY_SIZE, i, d_counters); // int *h_d_counters; // h_d_counters = (int *) malloc(NUM_RADICES * NUM_BLOCKS * NUMBER_OF_GROUPS_PER_BLOCK * sizeof(int)); // checkCudaErrors(hipMemcpy(h_d_counters, d_counters, NUM_RADICES * NUM_BLOCKS * NUMBER_OF_GROUPS_PER_BLOCK * sizeof(int), hipMemcpyDeviceToHost )); // for (int i = 0; i < NUM_RADICES; ++i) // { // printf("Radix: %d Values: ", i); // for(int j = 0; j<NUM_BLOCKS; j++){ // for(int k = 0; k<NUMBER_OF_GROUPS_PER_BLOCK; k++){ // printf("%d ", h_d_counters[getAddress(i,j,k)]); // } // printf("\t"); // } // printf("\n\n"); // } launch_kernel_phase_2(d_cellID, d_objectID, ARRAY_SIZE, i, d_counters, d_partial_prefix_sums_per_radix); int *h_d_partial_prefix_sums_per_radix; h_d_partial_prefix_sums_per_radix = (int*) malloc(sizeof(int) * NUM_RADICES); checkCudaErrors(hipMemcpy(h_d_partial_prefix_sums_per_radix, d_partial_prefix_sums_per_radix, sizeof(int) * NUM_RADICES, hipMemcpyDeviceToHost)); //for (int i = 0; i < NUM_RADICES; ++i) // printf("Radix %d: %d\n", i,h_d_partial_prefix_sums_per_radix[i]); launch_kernel_phase_3(d_cellID, d_objectID, ARRAY_SIZE, i, d_counters, d_partial_prefix_sums_per_radix, d_sorted_cellID, d_sorted_objectID); } int* h_d_sorted_cellID; h_d_sorted_cellID = (int *)malloc( ARRAY_SIZE* sizeof(int)); checkCudaErrors(hipMemcpy(h_d_sorted_cellID, d_sorted_cellID, ARRAY_SIZE*sizeof(int), hipMemcpyDeviceToHost)); // printf("Sorted Array\n"); // for (int i = 0; i < ARRAY_SIZE; ++i) // { // printf("%d ", h_d_sorted_cellID[i]); // } int* h_d_sorted_objectID; h_d_sorted_objectID = (int *)malloc( ARRAY_SIZE* sizeof(int)); checkCudaErrors(hipMemcpy(h_d_sorted_objectID, d_sorted_objectID, ARRAY_SIZE*sizeof(int), hipMemcpyDeviceToHost)); // printf("\n"); //printf("Sorted Array\n"); //for (int i = 0; i < ARRAY_SIZE; ++i) //{ // printf("(%d, %d), ",h_d_sorted_cellID[i], h_d_sorted_objectID[i]); //} checkCudaErrors(hipFree(d_sorted_cellID)); checkCudaErrors(hipFree(d_sorted_objectID)); checkCudaErrors(hipFree(d_counters)); } // int main(int argc, char const *argv[]) // { // hipSetDevice(1); // // srand(time(NULL)); // // for (int i = 0; i < ARRAY_SIZE; ++i) // // { // // cellID[i] = rand(); // // objectID[i] = i; // // } // // for (int i = 0; i < ARRAY_SIZE; ++i) // // { // // cellID[i] = i; // // objectID[i] = ARRAY_SIZE - i; // // } // sort(); // return 0; // }
83bbe56d545f179d70f21df4ee3930b5ea940fbd.cu
#include <stdio.h> #include <ctime> #include <climits> #include <helper_cuda.h> #define R 16 #define L 8 //Don't increase L beyond 8 #define MAX_GRID_SIZE 2147483646 #define BLOCK_DIM_SORT 192 #define NUMBER_OF_GROUPS_PER_BLOCK 12 #define NUM_RADICES (1<<L) #define NUM_BLOCKS 16//ceil((float)size/BLOCK_DIM_SORT) //If the size is 3072 = 192*16 #define ARRAY_SIZE 3072 #define NUM_GROUPS NUMBER_OF_GROUPS_PER_BLOCK * NUM_BLOCKS #define NUM_RADICES_PER_BLOCK 16 // NUM_RADICES/NUM_BLOCK = 256/8 = 32 // int cellID[ARRAY_SIZE]; // int objectID[ARRAY_SIZE]; __device__ __host__ int getAddress(int a, int b, int c){ //d1: radices //d2: thread blocks //d3: thread groups //a: radix, b: thread block, c: thread group // int d1 = NUM_RADICES; int d2 = NUM_BLOCKS; int d3 = NUMBER_OF_GROUPS_PER_BLOCK; return (d2*d3*a + d3*b + c); //http://stackoverflow.com/questions/789913/array-offset-calculations-in-multi-dimensional-array-column-vs-row-major } void __global__ phase_1_kernel(int *d_cellID, int *d_objectID, int size, int partition_size, int Num_Elements_Per_Group, int pass, int * d_counters){ __shared__ int shared_counters[NUMBER_OF_GROUPS_PER_BLOCK][NUM_RADICES]; if(threadIdx.x%R==0){ for (int i = 0; i < NUM_RADICES; ++i){ shared_counters[threadIdx.x/R][i] = 0; } } __syncthreads(); int firstCellID =(blockIdx.x*NUMBER_OF_GROUPS_PER_BLOCK + threadIdx.x/R)*Num_Elements_Per_Group + threadIdx.x%R; unsigned int mask = 0; for (int i = 0; i < L; ++i) mask = mask<<1 | 1; mask = mask << (pass*L); for (int i = firstCellID; i < firstCellID + R*partition_size; i+=R) { unsigned int masked_number = d_cellID[i] & (mask); masked_number = masked_number >> (L*pass); atomicInc((unsigned int*)&shared_counters[threadIdx.x/R][masked_number], INT_MAX); } __syncthreads(); if(threadIdx.x%R == 0){ for (int i = 0; i < NUM_RADICES; ++i) { d_counters[getAddress(i,blockIdx.x,threadIdx.x/R)] = shared_counters[threadIdx.x/R][i]; } } } void launch_kernel_phase_1(int* d_cellID, int * d_objectID, int size, int pass, int* d_counters){ //keep launch config of all kernels same dim3 grid(ceil((float)size/BLOCK_DIM_SORT)); dim3 block(BLOCK_DIM_SORT,1); int partition_size = ceil((float)size/MAX_GRID_SIZE); int Num_Elements_Per_Group = R*partition_size; phase_1_kernel <<<grid, block>>>(d_cellID, d_objectID, size, partition_size, Num_Elements_Per_Group, pass, d_counters); } void __global__ phase_2_kernel(int *d_cellID, int *d_objectID, int size, int partition_size, int Num_Elements_Per_Group, int pass, int * d_counters, int *d_partial_prefix_sums_per_radix){ int lowestRadixForBlock = NUM_RADICES_PER_BLOCK*blockIdx.x ; int highestRadixForBlock = lowestRadixForBlock + NUM_RADICES_PER_BLOCK - 1; //Both radices are included in the radix range for this group __shared__ int shared_counters[NUM_RADICES_PER_BLOCK][NUM_GROUPS]; if(threadIdx.x < NUM_RADICES_PER_BLOCK) { int i = threadIdx.x; for(int j = 0; j < NUM_GROUPS; j++) { shared_counters[i][j] = 0; } } __syncthreads(); if(threadIdx.x <= highestRadixForBlock - lowestRadixForBlock) { int i = threadIdx.x + lowestRadixForBlock; for (int j = 0; j < NUM_GROUPS; ++j) { shared_counters[i - lowestRadixForBlock][j] = d_counters[getAddress(i,j/NUMBER_OF_GROUPS_PER_BLOCK, j%NUMBER_OF_GROUPS_PER_BLOCK)]; } } __syncthreads(); //Prefix sum naive implementation for shared memory if(threadIdx.x <= highestRadixForBlock - lowestRadixForBlock) { int i = threadIdx.x + lowestRadixForBlock; for(int j = 1;j<NUM_GROUPS; j++){ shared_counters[i - lowestRadixForBlock][j] = shared_counters[i - lowestRadixForBlock][j] + shared_counters[i - lowestRadixForBlock][j-1]; } } __syncthreads(); if(threadIdx.x <= highestRadixForBlock - lowestRadixForBlock) { int i = threadIdx.x + lowestRadixForBlock; for (int j = 0; j < NUM_GROUPS; ++j) { d_counters[getAddress(i,j/NUMBER_OF_GROUPS_PER_BLOCK, j%NUMBER_OF_GROUPS_PER_BLOCK)] = shared_counters[i - lowestRadixForBlock][j]; } d_partial_prefix_sums_per_radix[i] = shared_counters[i - lowestRadixForBlock][NUM_GROUPS-1]; } } void launch_kernel_phase_2(int* d_cellID, int * d_objectID, int size, int pass, int* d_counters, int* d_partial_prefix_sums_per_radix){ //keep launch config of all kernels same dim3 grid(NUM_RADICES/NUM_RADICES_PER_BLOCK); dim3 block(BLOCK_DIM_SORT,1); int partition_size = ceil((float)size/MAX_GRID_SIZE); int Num_Elements_Per_Group = R*partition_size; phase_2_kernel <<<grid, block>>>(d_cellID, d_objectID, size, partition_size, Num_Elements_Per_Group, pass, d_counters, d_partial_prefix_sums_per_radix); } void __global__ phase_3_kernel(int *d_cellID, int *d_objectID, int size, int partition_size, int Num_Elements_Per_Group, int pass, int * d_counters, int* d_partial_prefix_sums_per_radix, int * d_sorted_cellID, int* d_sorted_objectID){ __shared__ int shared_parallel_prefix[NUM_RADICES]; __shared__ int shared_counters[NUMBER_OF_GROUPS_PER_BLOCK][NUM_RADICES]; if(threadIdx.x%R==0){ for (int i = 0; i < NUM_RADICES; ++i){ shared_counters[threadIdx.x/R][i] = 0; } } __syncthreads(); if(threadIdx.x == 0) { for (int i = 0; i < NUM_RADICES; ++i) { shared_parallel_prefix[i] = d_partial_prefix_sums_per_radix[i]; } } __syncthreads(); if(threadIdx.x == 0){ for (int i = 1; i < NUM_RADICES; ++i) { shared_parallel_prefix[i] = shared_parallel_prefix[i-1] + shared_parallel_prefix[i]; } } __syncthreads(); if(threadIdx.x%R == 0){ for (int i = 0; i < NUM_RADICES; ++i) { shared_counters[threadIdx.x/R][i] = d_counters[getAddress(i,blockIdx.x,threadIdx.x/R)]; if(i>0) shared_counters[threadIdx.x/R][i] += shared_parallel_prefix[i-1]; } } __syncthreads(); int firstCellID =(blockIdx.x*NUMBER_OF_GROUPS_PER_BLOCK + threadIdx.x/R)*Num_Elements_Per_Group + threadIdx.x%R; unsigned int mask = 0; for (int i = 0; i < L; ++i) mask = mask<<1 | 1; mask = mask << (pass*L); for (int i = firstCellID; i < firstCellID + R*partition_size; i+=R) { int masked_number = d_cellID[i] & (mask); masked_number = masked_number >> (L*pass); if(shared_counters[threadIdx.x/R][masked_number] != ARRAY_SIZE) //TODO: Remove this if condition by fixing the math d_sorted_cellID[shared_counters[threadIdx.x/R][masked_number]] += masked_number << L*pass; masked_number = d_objectID[i] & (mask); masked_number = masked_number >> (L*pass); if(shared_counters[threadIdx.x/R][masked_number] != ARRAY_SIZE) //TODO: Remove this if condition by fixing the math d_sorted_objectID[shared_counters[threadIdx.x/R][masked_number]] += masked_number << L*pass; atomicInc((unsigned int*)&shared_counters[threadIdx.x/R][masked_number], INT_MAX); } } void launch_kernel_phase_3(int* d_cellID, int * d_objectID, int size, int pass, int* d_counters, int *d_partial_prefix_sums_per_radix, int * d_sorted_cellID, int* d_sorted_objectID){ //keep launch config of all kernels same dim3 grid(ceil((float)size/BLOCK_DIM_SORT)); dim3 block(BLOCK_DIM_SORT,1); int partition_size = ceil((float)size/MAX_GRID_SIZE); int Num_Elements_Per_Group = R*partition_size; phase_3_kernel <<<grid, block>>>(d_cellID, d_objectID, size, partition_size, Num_Elements_Per_Group, pass, d_counters, d_partial_prefix_sums_per_radix,d_sorted_cellID, d_sorted_objectID); } void sort(int *d_cellID, int *d_objectID){ // int * d_cellID, *d_objectID; // checkCudaErrors(cudaMalloc(&d_cellID, ARRAY_SIZE*sizeof(int))); // checkCudaErrors(cudaMalloc(&d_objectID, ARRAY_SIZE*sizeof(int))); // checkCudaErrors(cudaMemcpy(d_cellID, cellID, ARRAY_SIZE*sizeof(int),cudaMemcpyHostToDevice)); // checkCudaErrors(cudaMemcpy(d_objectID, objectID, ARRAY_SIZE*sizeof(int), cudaMemcpyHostToDevice)); int * d_counters; checkCudaErrors(cudaMalloc(&d_counters, NUM_RADICES * NUM_BLOCKS * NUMBER_OF_GROUPS_PER_BLOCK * sizeof(int))); int* d_partial_prefix_sums_per_radix; checkCudaErrors(cudaMalloc(&d_partial_prefix_sums_per_radix, sizeof(int) * NUM_RADICES)); int *d_sorted_cellID; checkCudaErrors(cudaMalloc(&d_sorted_cellID, ARRAY_SIZE*sizeof(int))); checkCudaErrors(cudaMemset(d_sorted_cellID, 0, ARRAY_SIZE*sizeof(int))); int *d_sorted_objectID; checkCudaErrors(cudaMalloc(&d_sorted_objectID, ARRAY_SIZE*sizeof(int))); checkCudaErrors(cudaMemset(d_sorted_objectID, 0, ARRAY_SIZE*sizeof(int))); for(int i = 0; i < 4 ; i++) { //printf("Pass %d\n", i); checkCudaErrors(cudaMemset(d_counters, 0, NUM_RADICES * NUM_BLOCKS * NUMBER_OF_GROUPS_PER_BLOCK * sizeof(int) )); launch_kernel_phase_1(d_cellID, d_objectID, ARRAY_SIZE, i, d_counters); // int *h_d_counters; // h_d_counters = (int *) malloc(NUM_RADICES * NUM_BLOCKS * NUMBER_OF_GROUPS_PER_BLOCK * sizeof(int)); // checkCudaErrors(cudaMemcpy(h_d_counters, d_counters, NUM_RADICES * NUM_BLOCKS * NUMBER_OF_GROUPS_PER_BLOCK * sizeof(int), cudaMemcpyDeviceToHost )); // for (int i = 0; i < NUM_RADICES; ++i) // { // printf("Radix: %d Values: ", i); // for(int j = 0; j<NUM_BLOCKS; j++){ // for(int k = 0; k<NUMBER_OF_GROUPS_PER_BLOCK; k++){ // printf("%d ", h_d_counters[getAddress(i,j,k)]); // } // printf("\t"); // } // printf("\n\n"); // } launch_kernel_phase_2(d_cellID, d_objectID, ARRAY_SIZE, i, d_counters, d_partial_prefix_sums_per_radix); int *h_d_partial_prefix_sums_per_radix; h_d_partial_prefix_sums_per_radix = (int*) malloc(sizeof(int) * NUM_RADICES); checkCudaErrors(cudaMemcpy(h_d_partial_prefix_sums_per_radix, d_partial_prefix_sums_per_radix, sizeof(int) * NUM_RADICES, cudaMemcpyDeviceToHost)); //for (int i = 0; i < NUM_RADICES; ++i) // printf("Radix %d: %d\n", i,h_d_partial_prefix_sums_per_radix[i]); launch_kernel_phase_3(d_cellID, d_objectID, ARRAY_SIZE, i, d_counters, d_partial_prefix_sums_per_radix, d_sorted_cellID, d_sorted_objectID); } int* h_d_sorted_cellID; h_d_sorted_cellID = (int *)malloc( ARRAY_SIZE* sizeof(int)); checkCudaErrors(cudaMemcpy(h_d_sorted_cellID, d_sorted_cellID, ARRAY_SIZE*sizeof(int), cudaMemcpyDeviceToHost)); // printf("Sorted Array\n"); // for (int i = 0; i < ARRAY_SIZE; ++i) // { // printf("%d ", h_d_sorted_cellID[i]); // } int* h_d_sorted_objectID; h_d_sorted_objectID = (int *)malloc( ARRAY_SIZE* sizeof(int)); checkCudaErrors(cudaMemcpy(h_d_sorted_objectID, d_sorted_objectID, ARRAY_SIZE*sizeof(int), cudaMemcpyDeviceToHost)); // printf("\n"); //printf("Sorted Array\n"); //for (int i = 0; i < ARRAY_SIZE; ++i) //{ // printf("(%d, %d), ",h_d_sorted_cellID[i], h_d_sorted_objectID[i]); //} checkCudaErrors(cudaFree(d_sorted_cellID)); checkCudaErrors(cudaFree(d_sorted_objectID)); checkCudaErrors(cudaFree(d_counters)); } // int main(int argc, char const *argv[]) // { // cudaSetDevice(1); // // srand(time(NULL)); // // for (int i = 0; i < ARRAY_SIZE; ++i) // // { // // cellID[i] = rand(); // // objectID[i] = i; // // } // // for (int i = 0; i < ARRAY_SIZE; ++i) // // { // // cellID[i] = i; // // objectID[i] = ARRAY_SIZE - i; // // } // sort(); // return 0; // }
d91e2903bf5542fc10b52103e4ceaa22f1b6a083.hip
// !!! This is a file automatically generated by hipify!!! #include <THH/THHApply.cuh> #include <TH/THHalf.h> #include <THH/THHNumerics.cuh> #include <THH/THHTensorCopy.hpp> #include <type_traits> #include <c10/util/BFloat16.h> // Copy operator for the pointwise apply kernel template <typename T> struct CopyOp { __device__ __forceinline__ void operator()(T* dst, T* src) { #if __CUDA_ARCH__ >= 350 *dst = c10::static_cast_with_inter_type<T, T>::apply(*src); #else *dst = c10::static_cast_with_inter_type<T, T>::apply(*src); #endif } }; template <> struct CopyOp <bool> { __device__ __forceinline__ void operator()(bool* dst, bool* src) { *dst = ScalarConvert<bool, bool>::to(*src); } }; template <> struct CopyOp <at::BFloat16> { __device__ __forceinline__ void operator()(at::BFloat16* dst, at::BFloat16* src) { *dst = ScalarConvert<at::BFloat16, at::BFloat16>::to(*src); } }; #include <THH/generic/THHTensorCopy.hip> #include <THH/THHGenerateAllTypes.h> #include <THH/generic/THHTensorCopy.hip> #include <THH/THHGenerateComplexTypes.h> #include <THH/generic/THHTensorCopy.hip> #include <THH/THHGenerateBoolType.h> #include <THH/generic/THHTensorCopy.hip> #include <THH/THHGenerateBFloat16Type.h>
d91e2903bf5542fc10b52103e4ceaa22f1b6a083.cu
#include <THC/THCApply.cuh> #include <TH/THHalf.h> #include <THC/THCNumerics.cuh> #include <THC/THCTensorCopy.hpp> #include <type_traits> #include <c10/util/BFloat16.h> // Copy operator for the pointwise apply kernel template <typename T> struct CopyOp { __device__ __forceinline__ void operator()(T* dst, T* src) { #if __CUDA_ARCH__ >= 350 *dst = c10::static_cast_with_inter_type<T, T>::apply(*src); #else *dst = c10::static_cast_with_inter_type<T, T>::apply(*src); #endif } }; template <> struct CopyOp <bool> { __device__ __forceinline__ void operator()(bool* dst, bool* src) { *dst = ScalarConvert<bool, bool>::to(*src); } }; template <> struct CopyOp <at::BFloat16> { __device__ __forceinline__ void operator()(at::BFloat16* dst, at::BFloat16* src) { *dst = ScalarConvert<at::BFloat16, at::BFloat16>::to(*src); } }; #include <THC/generic/THCTensorCopy.cu> #include <THC/THCGenerateAllTypes.h> #include <THC/generic/THCTensorCopy.cu> #include <THC/THCGenerateComplexTypes.h> #include <THC/generic/THCTensorCopy.cu> #include <THC/THCGenerateBoolType.h> #include <THC/generic/THCTensorCopy.cu> #include <THC/THCGenerateBFloat16Type.h>
21a103910a54e3183ad89babd429d2649d0b8c65.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // ### // ### // ### Practical Course: GPU Programming in Computer Vision // ### // ### // ### Technical University Munich, Computer Vision Group // ### Winter Semester 2015/2016, March 15 - April 15 // ### // ### #include "helper.h" #include <iostream> using namespace std; __global__ void block_sum(float *input, float *results, size_t n) { extern __shared__ float sdata[]; int i = threadIdx.x + blockDim.x * blockIdx.x; int tx = threadIdx.x; // load input into __shared__ memory if (i < n) { sdata[tx] = input[i]; __syncthreads(); } else { sdata[tx] = 0; } if (i < n) { // block-wide reduction in __shared__ mem for(int offset = blockDim.x / 2; offset > 0; offset /= 2) { if(tx < offset) { // add a partial sum upstream to our own sdata[tx] += sdata[tx + offset]; } __syncthreads(); } // finally, thread 0 writes the result if(threadIdx.x == 0) { // note that the result is per-block // not per-thread results[blockIdx.x] = sdata[0]; } } } int main(int argc, char **argv) { int n = 10000; // alloc and init input array on host (CPU) float *a = new float[n]; for (int i = 0; i < n; i++) { a[i] = 1; } // parameters int blocklength = 1024; int nblocks = (n + blocklength -1)/blocklength; size_t nbytes = n*sizeof(float); // other variables float *aux, *results; results = new float[nblocks]; // alloc device arrays Timer timer; timer.start(); float *d_a = NULL; float *d_results = NULL; hipMalloc(&d_a, nbytes); hipMalloc(&d_results, nblocks*sizeof(float)); hipMemcpy( d_a, a, nbytes, hipMemcpyHostToDevice ); dim3 block = dim3(blocklength,1,1); dim3 grid = dim3(nblocks, 1, 1 ); // only one reduction //hipLaunchKernelGGL(( block_sum) , dim3(grid),dim3(block),blocklength*sizeof(float), 0, d_a, d_results, n); // reductions until size 1 while (true) { hipLaunchKernelGGL(( block_sum) , dim3(grid),dim3(block),blocklength*sizeof(float), 0, d_a, d_results, n); if (nblocks == 1) break; hipMemcpy( d_a, d_results, nblocks*sizeof(float), hipMemcpyDeviceToDevice ); n = nblocks; nblocks = (n + blocklength -1)/blocklength; grid = dim3(nblocks, 1, 1 ); } // show results // float total = 0; hipMemcpy( results, d_results, sizeof(float), hipMemcpyDeviceToHost ); timer.end(); float t = timer.get(); // elapsed time in seconds cout << "My time: " << t*1000 << " ms" << endl; // for (int i = 0; i < n; i++) { // total += results[i]; // cout << results[i] << endl; // } // cout << "total :" << total << endl; cout << "total :" << results[0] << endl; }
21a103910a54e3183ad89babd429d2649d0b8c65.cu
// ### // ### // ### Practical Course: GPU Programming in Computer Vision // ### // ### // ### Technical University Munich, Computer Vision Group // ### Winter Semester 2015/2016, March 15 - April 15 // ### // ### #include "helper.h" #include <iostream> using namespace std; __global__ void block_sum(float *input, float *results, size_t n) { extern __shared__ float sdata[]; int i = threadIdx.x + blockDim.x * blockIdx.x; int tx = threadIdx.x; // load input into __shared__ memory if (i < n) { sdata[tx] = input[i]; __syncthreads(); } else { sdata[tx] = 0; } if (i < n) { // block-wide reduction in __shared__ mem for(int offset = blockDim.x / 2; offset > 0; offset /= 2) { if(tx < offset) { // add a partial sum upstream to our own sdata[tx] += sdata[tx + offset]; } __syncthreads(); } // finally, thread 0 writes the result if(threadIdx.x == 0) { // note that the result is per-block // not per-thread results[blockIdx.x] = sdata[0]; } } } int main(int argc, char **argv) { int n = 10000; // alloc and init input array on host (CPU) float *a = new float[n]; for (int i = 0; i < n; i++) { a[i] = 1; } // parameters int blocklength = 1024; int nblocks = (n + blocklength -1)/blocklength; size_t nbytes = n*sizeof(float); // other variables float *aux, *results; results = new float[nblocks]; // alloc device arrays Timer timer; timer.start(); float *d_a = NULL; float *d_results = NULL; cudaMalloc(&d_a, nbytes); cudaMalloc(&d_results, nblocks*sizeof(float)); cudaMemcpy( d_a, a, nbytes, cudaMemcpyHostToDevice ); dim3 block = dim3(blocklength,1,1); dim3 grid = dim3(nblocks, 1, 1 ); // only one reduction // block_sum <<<grid,block,blocklength*sizeof(float)>>> (d_a, d_results, n); // reductions until size 1 while (true) { block_sum <<<grid,block,blocklength*sizeof(float)>>> (d_a, d_results, n); if (nblocks == 1) break; cudaMemcpy( d_a, d_results, nblocks*sizeof(float), cudaMemcpyDeviceToDevice ); n = nblocks; nblocks = (n + blocklength -1)/blocklength; grid = dim3(nblocks, 1, 1 ); } // show results // float total = 0; cudaMemcpy( results, d_results, sizeof(float), cudaMemcpyDeviceToHost ); timer.end(); float t = timer.get(); // elapsed time in seconds cout << "My time: " << t*1000 << " ms" << endl; // for (int i = 0; i < n; i++) { // total += results[i]; // cout << results[i] << endl; // } // cout << "total :" << total << endl; cout << "total :" << results[0] << endl; }
0e3f3821a66eef931849943764d56ab79e74e07e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "VectorMath.hh" #include "helper.hh" #include <vector> #include <iostream> using namespace std; #define THREADS_PER_BLOCK 1024 __global__ void MVP_kernel(double * A, double * x, double * b, int N) { int tid = blockIdx.x * blockDim.x + threadIdx.x; double sum = 0; for (int i=0; i<N; i++) { int ind = tid*N + i; sum += A[ind] * x[i]; } b[tid] = sum; } vector<double> MVP_GPU(vector<vector<double>> &A, vector<double> &x) { int N = A.size(); size_t dSize = sizeof(double); // device pointers double *d_A, *d_x, *d_b; double *b; b = new double[N]; hipMalloc(&d_A, N*N*dSize); hipMalloc(&d_x, N*dSize); hipMalloc(&d_b, N*dSize); double * array = new double[N*N]; // convert from vector to array for (int i=0; i<N; i++) { for (int j=0; j<N; j++) { int ind = N*i + j; array[ind] = A[i][j]; } } hipMemcpy(d_A, array, N*N*dSize, hipMemcpyHostToDevice); hipMemcpy(d_x, &x[0], N*dSize, hipMemcpyHostToDevice); hipLaunchKernelGGL(( MVP_kernel), dim3(N/THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, d_A, d_x, d_b, N); hipMemcpy(b, d_b, N*dSize, hipMemcpyDeviceToHost); // convert to vector vector<double> b_vec(N); for (int i=0; i<N; i++) { b_vec[i] = b[i]; } hipFree(d_A); hipFree(d_x); hipFree(d_b); delete b; delete array; return b_vec; }
0e3f3821a66eef931849943764d56ab79e74e07e.cu
#include "VectorMath.hh" #include "helper.hh" #include <vector> #include <iostream> using namespace std; #define THREADS_PER_BLOCK 1024 __global__ void MVP_kernel(double * A, double * x, double * b, int N) { int tid = blockIdx.x * blockDim.x + threadIdx.x; double sum = 0; for (int i=0; i<N; i++) { int ind = tid*N + i; sum += A[ind] * x[i]; } b[tid] = sum; } vector<double> MVP_GPU(vector<vector<double>> &A, vector<double> &x) { int N = A.size(); size_t dSize = sizeof(double); // device pointers double *d_A, *d_x, *d_b; double *b; b = new double[N]; cudaMalloc(&d_A, N*N*dSize); cudaMalloc(&d_x, N*dSize); cudaMalloc(&d_b, N*dSize); double * array = new double[N*N]; // convert from vector to array for (int i=0; i<N; i++) { for (int j=0; j<N; j++) { int ind = N*i + j; array[ind] = A[i][j]; } } cudaMemcpy(d_A, array, N*N*dSize, cudaMemcpyHostToDevice); cudaMemcpy(d_x, &x[0], N*dSize, cudaMemcpyHostToDevice); MVP_kernel<<<N/THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(d_A, d_x, d_b, N); cudaMemcpy(b, d_b, N*dSize, cudaMemcpyDeviceToHost); // convert to vector vector<double> b_vec(N); for (int i=0; i<N; i++) { b_vec[i] = b[i]; } cudaFree(d_A); cudaFree(d_x); cudaFree(d_b); delete b; delete array; return b_vec; }
466d8fc560f003967caac7bd87da85873cd0043b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit #include <thrust/device_vector.h> #include <thrust/functional.h> // thrust::plus #include <thrust/reduce.h> #include <cmath> #include <cstdlib> #include <cstring> #include "caffe/common.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <> void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const float alpha, const float* A, const float* B, const float beta, float* C) { // Note that cublas follows fortran order. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; hipblasOperation_t cuTransA = (TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; hipblasOperation_t cuTransB = (TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; CUBLAS_CHECK(hipblasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } template <> void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const double alpha, const double* A, const double* B, const double beta, double* C) { // Note that cublas follows fortran order. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; hipblasOperation_t cuTransA = (TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; hipblasOperation_t cuTransB = (TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; CUBLAS_CHECK(hipblasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } template <> void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M, const int N, const float alpha, const float* A, const float* x, const float beta, float* y) { hipblasOperation_t cuTransA = (TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N; CUBLAS_CHECK(hipblasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha, A, N, x, 1, &beta, y, 1)); } template <> void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M, const int N, const double alpha, const double* A, const double* x, const double beta, double* y) { hipblasOperation_t cuTransA = (TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N; CUBLAS_CHECK(hipblasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha, A, N, x, 1, &beta, y, 1)); } template <> void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X, float* Y) { CUBLAS_CHECK(hipblasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1)); } template <> void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X, double* Y) { CUBLAS_CHECK(hipblasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1)); } void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) { if (X != Y) { CUDA_CHECK(hipMemcpy(Y, X, N, hipMemcpyDefault)); // NOLINT(caffe/alt_fn) } } template <> void caffe_gpu_scal<float>(const int N, const float alpha, float *X) {//single CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), N, &alpha, X, 1)); } template <> void caffe_gpu_scal<double>(const int N, const double alpha, double *X) {//double CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), N, &alpha, X, 1)); } template <> void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X, const float beta, float* Y) { caffe_gpu_scal<float>(N, beta, Y); caffe_gpu_axpy<float>(N, alpha, X, Y); } template <> void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X, const double beta, double* Y) { caffe_gpu_scal<double>(N, beta, Y); caffe_gpu_axpy<double>(N, alpha, X, Y); } template <> void caffe_gpu_dot<float>(const int n, const float* x, const float* y, float* out) { CUBLAS_CHECK(hipblasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out)); } template <> void caffe_gpu_dot<double>(const int n, const double* x, const double* y, double * out) { CUBLAS_CHECK(hipblasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out)); } template <> void caffe_gpu_asum<float>(const int n, const float* x, float* y) { CUBLAS_CHECK(hipblasSasum(Caffe::cublas_handle(), n, x, 1, y)); } template <> void caffe_gpu_asum<double>(const int n, const double* x, double* y) { CUBLAS_CHECK(hipblasDasum(Caffe::cublas_handle(), n, x, 1, y)); } template <> void caffe_gpu_scale<float>(const int n, const float alpha, const float *x, float* y) { CUBLAS_CHECK(hipblasScopy(Caffe::cublas_handle(), n, x, 1, y, 1)); CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), n, &alpha, y, 1)); } template <> void caffe_gpu_scale<double>(const int n, const double alpha, const double *x, double* y) { CUBLAS_CHECK(hipblasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1)); CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), n, &alpha, y, 1)); } template <typename Dtype> __global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = alpha; } } template <typename Dtype> void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) { if (alpha == 0) { CUDA_CHECK(hipMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn) return; } // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( set_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, alpha, Y); } template void caffe_gpu_set<int>(const int N, const int alpha, int* Y); template void caffe_gpu_set<float>(const int N, const float alpha, float* Y); template void caffe_gpu_set<double>(const int N, const double alpha, double* Y); template <typename Dtype> __global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] += alpha; } } template <> void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( add_scalar_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, alpha, Y); } template <> void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( add_scalar_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, alpha, Y); } template <typename Dtype> __global__ void add_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] + b[index]; } } template <> void caffe_gpu_add<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( add_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <> void caffe_gpu_add<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( add_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <typename Dtype> __global__ void sub_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] - b[index]; } } template <> void caffe_gpu_sub<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( sub_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <> void caffe_gpu_sub<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( sub_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <typename Dtype> __global__ void mul_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] * b[index]; } } template <> void caffe_gpu_mul<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( mul_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <> void caffe_gpu_mul<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( mul_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <typename Dtype> __global__ void div_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] / b[index]; } } template <> void caffe_gpu_div<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( div_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <> void caffe_gpu_div<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( div_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <typename Dtype> __global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = abs(a[index]); } } template <> void caffe_gpu_abs<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( abs_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } template <> void caffe_gpu_abs<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( abs_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } template <typename Dtype> __global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = exp(a[index]); } } template <> void caffe_gpu_exp<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( exp_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } template <> void caffe_gpu_exp<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( exp_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } template <typename Dtype> __global__ void log_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = log(a[index]); } } template <> void caffe_gpu_log<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( log_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } template <> void caffe_gpu_log<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( log_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } template <typename Dtype> __global__ void powx_kernel(const int n, const Dtype* a, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = pow(a[index], alpha); } } template <> void caffe_gpu_powx<float>(const int N, const float* a, const float alpha, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( powx_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, alpha, y); } template <> void caffe_gpu_powx<double>(const int N, const double* a, const double alpha, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( powx_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, alpha, y); } DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index]) - (x[index] < Dtype(0))); DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index])); __global__ void popc_kernel(const int n, const float* a, const float* b, uint8_t* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = __popc(static_cast<uint32_t>(a[index]) ^ static_cast<uint32_t>(b[index])); } } __global__ void popcll_kernel(const int n, const double* a, const double* b, uint8_t* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = __popcll(static_cast<uint64_t>(a[index]) ^ static_cast<uint64_t>(b[index])); } } template <> uint32_t caffe_gpu_hamming_distance<float>(const int n, const float* x, const float* y) { // TODO: Fix caffe_gpu_hamming_distance (see failing unit test // TestHammingDistanceGPU in test_math_functions.cpp). NOT_IMPLEMENTED; thrust::device_vector<uint8_t> popcounts(n); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( popc_kernel), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, n, x, y, thrust::raw_pointer_cast(popcounts.data())); return thrust::reduce(popcounts.begin(), popcounts.end(), (uint32_t) 0, thrust::plus<uint32_t>()); } template <> uint32_t caffe_gpu_hamming_distance<double>(const int n, const double* x, const double* y) { // TODO: Fix caffe_gpu_hamming_distance (see failing unit test // TestHammingDistanceGPU in test_math_functions.cpp). NOT_IMPLEMENTED; thrust::device_vector<uint8_t> popcounts(n); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( popcll_kernel), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, n, x, y, thrust::raw_pointer_cast(popcounts.data())); return thrust::reduce(popcounts.begin(), popcounts.end(), /* NOLINT_NEXT_LINE(build/include_what_you_use) */ (uint32_t) 0, thrust::plus<uint32_t>()); } void caffe_gpu_rng_uniform(const int n, unsigned int* r) { CURAND_CHECK(hiprandGenerate(Caffe::curand_generator(), r, n)); } template <> void caffe_gpu_rng_uniform<float>(const int n, const float a, const float b, float* r) { CURAND_CHECK(hiprandGenerateUniform(Caffe::curand_generator(), r, n)); const float range = b - a; if (range != static_cast<float>(1)) { caffe_gpu_scal(n, range, r); } if (a != static_cast<float>(0)) { caffe_gpu_add_scalar(n, a, r); } } template <> void caffe_gpu_rng_uniform<double>(const int n, const double a, const double b, double* r) { CURAND_CHECK(hiprandGenerateUniformDouble(Caffe::curand_generator(), r, n)); const double range = b - a; if (range != static_cast<double>(1)) { caffe_gpu_scal(n, range, r); } if (a != static_cast<double>(0)) { caffe_gpu_add_scalar(n, a, r); } } template <> void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma, float* r) { CURAND_CHECK( hiprandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma)); } template <> void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma, double* r) { CURAND_CHECK( hiprandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma)); } //calculate gamma in kernel function //K: dimension of data //S: size of all data //N: num of output //W: weights, N*K //X: X (input of this layer) template <> float cal_gamma_gpu<float>(const int K, const int S, const int N, const float* W, const float* X, float* tempX1, float* tempX2){ srand((unsigned int)time(0)); float gamma = 0; float temp = 0; //random sample S pair to calculate gamma for(int i = 0;i < S;++i){ caffe_gpu_memset(sizeof(float) * K, 0, tempX1); caffe_gpu_memset(sizeof(float) * K, 0, tempX2); int s1 = rand() % S; int s2 = rand() % S; s2 = (s1 != s2) ? s2 : (s2 + 1) % S; const float* x1 = X + s1 * K; const float* x2 = X + s2 * K; caffe_gpu_gemv<float>(CblasNoTrans, N, K, 1.0, W, x1, 0.0, tempX1); caffe_gpu_gemv<float>(CblasNoTrans, N, K, 1.0, W, x2, 0.0, tempX2); //tempX2 = tempX1 - tempX2 caffe_gpu_sub<float>(K, tempX1, tempX2, tempX2); caffe_gpu_dot<float>(K, tempX2, tempX2, &temp); gamma += temp; } return S / gamma; } //output: // tempX1: W*x1-W*x2 // tempX2: x1-x2 // KK: co * (x1-x2)^T * W^T should be 1*N template<> void cal_add_item_gpu<float>(const float co, const int N, const int K, const float* W, const float* x1, float* tempX1, const float* x2, float* tempX2, const float gamma, float* KK){ caffe_gpu_memset(sizeof(float) * K, 0, tempX1); caffe_gpu_memset(sizeof(float) * K, 0, tempX2); caffe_gpu_gemv<float>(CblasNoTrans, N, K, 1.0, W, x1, 0.0, tempX1); caffe_gpu_gemv<float>(CblasNoTrans, N, K, 1.0, W, x2, 0.0, tempX2); float square_sum = 0; caffe_gpu_sub<float>(K, tempX1, tempX2, tempX1); caffe_gpu_sub<float>(K, x1, x2, tempX2); caffe_gpu_dot<float>(K, tempX1, tempX1, &square_sum); //calculate 2 * \gamma * kernel float kernel = 0.0f; float tempGamma = gamma / 4.0f; for(int i = 0;i < 5;++i){ float temp = (0.0 - tempGamma) * square_sum; temp = exp(temp); kernel += 2 * tempGamma* temp; tempGamma = tempGamma * 2; } //calculate KK <- co * kernel * X^T * W + 1 * KK caffe_gpu_gemm<float>(CblasNoTrans, CblasTrans, 1, N, K, co*kernel, tempX2, W, 0.0, KK); } template<> void cal_add_item_gpu<double>(const double co, const int N, const int K, const double* W, const double* x1, double* tempX1, const double* x2, double* tempX2, const double gamma, double* KK){ //TODO: complete double version of this function } // Gradient with respect to weight for MMD //N: number of output neuron //K: dimension of the feature //M: size of all data //S: size of source data in a batch //W: weight of this layer //X: input of this layer //gamma: gamma / learning rate //delta_W: gredient of weight template<> void caffe_gpu_mmd<float>(const int N, const int K, const int M, const int S, const int labeledTargetSize, const float* W, const float* X, const float gamma, float* delta_W){ srand((unsigned int)time(0)); //output the value of delta_W before MMD gradient float* temp; CUDA_CHECK(hipMalloc(&temp, N*K * sizeof(float))); caffe_gpu_sign(N*K, delta_W, temp); float sum; caffe_gpu_dot(N*K, delta_W, temp, &sum); LOG(INFO) << "delta_W before MMD, sum = " << sum << ", average = " << sum / (N*K); float* KK; float* tempX1; float* tempX2; CUDA_CHECK(hipMalloc(&KK, N * sizeof(float))); CUDA_CHECK(hipMalloc(&tempX1, K * sizeof(float))); CUDA_CHECK(hipMalloc(&tempX2, K * sizeof(float))); float kernel_gamma = cal_gamma_gpu(K, M, N, W, X, tempX1, tempX2); int SS = (S>(M-S)) ? S : M-S; for(int i = 0;i < SS;++i){ //random int s1 = rand() % S; int s2 = rand() % S; if(s1 == s2){ s2 = (s2 + 100) % S; } // b-test O(n2) //for(int j = 0; j < SS; ++j){ /*LOG(INFO) << "111111"; */ /*LOG(INFO) << "NNNNN " << N;*/ /*LOG(INFO) << "KKKKK " << K;*/ /*LOG(INFO) << "MMMMM " << M;*/ /*LOG(INFO) << "SSSSS " << S;*/ int t1 = rand() % (M - S); int t2 = rand() % (M - S); if(t1 == t2){ t2 = (t2 + 100) % (M - S); } t1 = t1 + S; t2 = t2 + S; /*LOG(INFO) << "NNNNN " << N;*/ /*LOG(INFO) << "KKKKK " << K;*/ /*LOG(INFO) << "MMMMM " << M;*/ /*LOG(INFO) << "SSSSS " << S;*/ const float *x_s1 = X + s1 * K; const float *x_s2 = X + s2 * K; const float *x_t1 = X + t1 * K; const float *x_t2 = X + t2 * K; const float tempS = 1.0; //LOG(INFO) << "22222222"; //calculate four items of MMD gradient caffe_gpu_memset(sizeof(float) * N, 0, KK); cal_add_item_gpu<float>(-1, N, K, W, x_s1, tempX1, x_s2, tempX2, kernel_gamma, KK); caffe_gpu_gemm<float>(CblasNoTrans, CblasTrans, N, K, 1, tempS * gamma, KK,tempX2, 1.0, delta_W); caffe_gpu_memset(sizeof(float) * N, 0, KK); cal_add_item_gpu<float>(1, N, K, W, x_s1, tempX1, x_t2, tempX2, kernel_gamma, KK); caffe_gpu_gemm<float>(CblasNoTrans, CblasTrans, N, K, 1, tempS * gamma, KK,tempX2, 1.0, delta_W); caffe_gpu_memset(sizeof(float) * N, 0, KK); cal_add_item_gpu<float>(1, N, K, W, x_s2, tempX1, x_t1, tempX2, kernel_gamma, KK); caffe_gpu_gemm<float>(CblasNoTrans, CblasTrans, N, K, 1, tempS * gamma, KK,tempX2, 1.0, delta_W); caffe_gpu_memset(sizeof(float) * N, 0, KK); cal_add_item_gpu<float>(-1, N, K, W, x_t1, tempX1, x_t2, tempX2, kernel_gamma, KK); caffe_gpu_gemm<float>(CblasNoTrans, CblasTrans, N, K, 1, tempS * gamma, KK,tempX2, 1.0, delta_W); } //output the value of delta_W after MMD gradient caffe_gpu_sign(N*K, delta_W, temp); caffe_gpu_dot(N*K, delta_W, temp, &sum); LOG(INFO) << "delta_W after MMD, sum = " << sum << ", average = " << sum / (N*K); CUDA_CHECK(hipFree(temp)); CUDA_CHECK(hipFree(KK)); CUDA_CHECK(hipFree(tempX1)); CUDA_CHECK(hipFree(tempX2)); } template<> void caffe_gpu_mmd<double>(const int N, const int K, const int M, const int S, const int labeledTargetSize, const double* W, const double* X, const double gamma, double* delta_W){ //TODO: complete the double version of this function } } // namespace caffe
466d8fc560f003967caac7bd87da85873cd0043b.cu
#include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit #include <thrust/device_vector.h> #include <thrust/functional.h> // thrust::plus #include <thrust/reduce.h> #include <cmath> #include <cstdlib> #include <cstring> #include "caffe/common.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <> void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const float alpha, const float* A, const float* B, const float beta, float* C) { // Note that cublas follows fortran order. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t cuTransB = (TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; CUBLAS_CHECK(cublasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } template <> void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const double alpha, const double* A, const double* B, const double beta, double* C) { // Note that cublas follows fortran order. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t cuTransB = (TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; CUBLAS_CHECK(cublasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } template <> void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M, const int N, const float alpha, const float* A, const float* x, const float beta, float* y) { cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N; CUBLAS_CHECK(cublasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha, A, N, x, 1, &beta, y, 1)); } template <> void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M, const int N, const double alpha, const double* A, const double* x, const double beta, double* y) { cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N; CUBLAS_CHECK(cublasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha, A, N, x, 1, &beta, y, 1)); } template <> void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X, float* Y) { CUBLAS_CHECK(cublasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1)); } template <> void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X, double* Y) { CUBLAS_CHECK(cublasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1)); } void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) { if (X != Y) { CUDA_CHECK(cudaMemcpy(Y, X, N, cudaMemcpyDefault)); // NOLINT(caffe/alt_fn) } } template <> void caffe_gpu_scal<float>(const int N, const float alpha, float *X) {//single CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), N, &alpha, X, 1)); } template <> void caffe_gpu_scal<double>(const int N, const double alpha, double *X) {//double CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), N, &alpha, X, 1)); } template <> void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X, const float beta, float* Y) { caffe_gpu_scal<float>(N, beta, Y); caffe_gpu_axpy<float>(N, alpha, X, Y); } template <> void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X, const double beta, double* Y) { caffe_gpu_scal<double>(N, beta, Y); caffe_gpu_axpy<double>(N, alpha, X, Y); } template <> void caffe_gpu_dot<float>(const int n, const float* x, const float* y, float* out) { CUBLAS_CHECK(cublasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out)); } template <> void caffe_gpu_dot<double>(const int n, const double* x, const double* y, double * out) { CUBLAS_CHECK(cublasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out)); } template <> void caffe_gpu_asum<float>(const int n, const float* x, float* y) { CUBLAS_CHECK(cublasSasum(Caffe::cublas_handle(), n, x, 1, y)); } template <> void caffe_gpu_asum<double>(const int n, const double* x, double* y) { CUBLAS_CHECK(cublasDasum(Caffe::cublas_handle(), n, x, 1, y)); } template <> void caffe_gpu_scale<float>(const int n, const float alpha, const float *x, float* y) { CUBLAS_CHECK(cublasScopy(Caffe::cublas_handle(), n, x, 1, y, 1)); CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), n, &alpha, y, 1)); } template <> void caffe_gpu_scale<double>(const int n, const double alpha, const double *x, double* y) { CUBLAS_CHECK(cublasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1)); CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), n, &alpha, y, 1)); } template <typename Dtype> __global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = alpha; } } template <typename Dtype> void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) { if (alpha == 0) { CUDA_CHECK(cudaMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn) return; } // NOLINT_NEXT_LINE(whitespace/operators) set_kernel<Dtype><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, alpha, Y); } template void caffe_gpu_set<int>(const int N, const int alpha, int* Y); template void caffe_gpu_set<float>(const int N, const float alpha, float* Y); template void caffe_gpu_set<double>(const int N, const double alpha, double* Y); template <typename Dtype> __global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] += alpha; } } template <> void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) { // NOLINT_NEXT_LINE(whitespace/operators) add_scalar_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, alpha, Y); } template <> void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) { // NOLINT_NEXT_LINE(whitespace/operators) add_scalar_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, alpha, Y); } template <typename Dtype> __global__ void add_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] + b[index]; } } template <> void caffe_gpu_add<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) add_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <> void caffe_gpu_add<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) add_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <typename Dtype> __global__ void sub_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] - b[index]; } } template <> void caffe_gpu_sub<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) sub_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <> void caffe_gpu_sub<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) sub_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <typename Dtype> __global__ void mul_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] * b[index]; } } template <> void caffe_gpu_mul<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) mul_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <> void caffe_gpu_mul<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) mul_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <typename Dtype> __global__ void div_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] / b[index]; } } template <> void caffe_gpu_div<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) div_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <> void caffe_gpu_div<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) div_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <typename Dtype> __global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = abs(a[index]); } } template <> void caffe_gpu_abs<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) abs_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <> void caffe_gpu_abs<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) abs_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <typename Dtype> __global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = exp(a[index]); } } template <> void caffe_gpu_exp<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) exp_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <> void caffe_gpu_exp<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) exp_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <typename Dtype> __global__ void log_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = log(a[index]); } } template <> void caffe_gpu_log<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) log_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <> void caffe_gpu_log<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) log_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <typename Dtype> __global__ void powx_kernel(const int n, const Dtype* a, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = pow(a[index], alpha); } } template <> void caffe_gpu_powx<float>(const int N, const float* a, const float alpha, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) powx_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, alpha, y); } template <> void caffe_gpu_powx<double>(const int N, const double* a, const double alpha, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) powx_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, alpha, y); } DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index]) - (x[index] < Dtype(0))); DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index])); __global__ void popc_kernel(const int n, const float* a, const float* b, uint8_t* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = __popc(static_cast<uint32_t>(a[index]) ^ static_cast<uint32_t>(b[index])); } } __global__ void popcll_kernel(const int n, const double* a, const double* b, uint8_t* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = __popcll(static_cast<uint64_t>(a[index]) ^ static_cast<uint64_t>(b[index])); } } template <> uint32_t caffe_gpu_hamming_distance<float>(const int n, const float* x, const float* y) { // TODO: Fix caffe_gpu_hamming_distance (see failing unit test // TestHammingDistanceGPU in test_math_functions.cpp). NOT_IMPLEMENTED; thrust::device_vector<uint8_t> popcounts(n); // NOLINT_NEXT_LINE(whitespace/operators) popc_kernel<<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS>>>( n, x, y, thrust::raw_pointer_cast(popcounts.data())); return thrust::reduce(popcounts.begin(), popcounts.end(), (uint32_t) 0, thrust::plus<uint32_t>()); } template <> uint32_t caffe_gpu_hamming_distance<double>(const int n, const double* x, const double* y) { // TODO: Fix caffe_gpu_hamming_distance (see failing unit test // TestHammingDistanceGPU in test_math_functions.cpp). NOT_IMPLEMENTED; thrust::device_vector<uint8_t> popcounts(n); // NOLINT_NEXT_LINE(whitespace/operators) popcll_kernel<<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS>>>( n, x, y, thrust::raw_pointer_cast(popcounts.data())); return thrust::reduce(popcounts.begin(), popcounts.end(), /* NOLINT_NEXT_LINE(build/include_what_you_use) */ (uint32_t) 0, thrust::plus<uint32_t>()); } void caffe_gpu_rng_uniform(const int n, unsigned int* r) { CURAND_CHECK(curandGenerate(Caffe::curand_generator(), r, n)); } template <> void caffe_gpu_rng_uniform<float>(const int n, const float a, const float b, float* r) { CURAND_CHECK(curandGenerateUniform(Caffe::curand_generator(), r, n)); const float range = b - a; if (range != static_cast<float>(1)) { caffe_gpu_scal(n, range, r); } if (a != static_cast<float>(0)) { caffe_gpu_add_scalar(n, a, r); } } template <> void caffe_gpu_rng_uniform<double>(const int n, const double a, const double b, double* r) { CURAND_CHECK(curandGenerateUniformDouble(Caffe::curand_generator(), r, n)); const double range = b - a; if (range != static_cast<double>(1)) { caffe_gpu_scal(n, range, r); } if (a != static_cast<double>(0)) { caffe_gpu_add_scalar(n, a, r); } } template <> void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma, float* r) { CURAND_CHECK( curandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma)); } template <> void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma, double* r) { CURAND_CHECK( curandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma)); } //calculate gamma in kernel function //K: dimension of data //S: size of all data //N: num of output //W: weights, N*K //X: X (input of this layer) template <> float cal_gamma_gpu<float>(const int K, const int S, const int N, const float* W, const float* X, float* tempX1, float* tempX2){ srand((unsigned int)time(0)); float gamma = 0; float temp = 0; //random sample S pair to calculate gamma for(int i = 0;i < S;++i){ caffe_gpu_memset(sizeof(float) * K, 0, tempX1); caffe_gpu_memset(sizeof(float) * K, 0, tempX2); int s1 = rand() % S; int s2 = rand() % S; s2 = (s1 != s2) ? s2 : (s2 + 1) % S; const float* x1 = X + s1 * K; const float* x2 = X + s2 * K; caffe_gpu_gemv<float>(CblasNoTrans, N, K, 1.0, W, x1, 0.0, tempX1); caffe_gpu_gemv<float>(CblasNoTrans, N, K, 1.0, W, x2, 0.0, tempX2); //tempX2 = tempX1 - tempX2 caffe_gpu_sub<float>(K, tempX1, tempX2, tempX2); caffe_gpu_dot<float>(K, tempX2, tempX2, &temp); gamma += temp; } return S / gamma; } //output: // tempX1: W*x1-W*x2 // tempX2: x1-x2 // KK: co * (x1-x2)^T * W^T should be 1*N template<> void cal_add_item_gpu<float>(const float co, const int N, const int K, const float* W, const float* x1, float* tempX1, const float* x2, float* tempX2, const float gamma, float* KK){ caffe_gpu_memset(sizeof(float) * K, 0, tempX1); caffe_gpu_memset(sizeof(float) * K, 0, tempX2); caffe_gpu_gemv<float>(CblasNoTrans, N, K, 1.0, W, x1, 0.0, tempX1); caffe_gpu_gemv<float>(CblasNoTrans, N, K, 1.0, W, x2, 0.0, tempX2); float square_sum = 0; caffe_gpu_sub<float>(K, tempX1, tempX2, tempX1); caffe_gpu_sub<float>(K, x1, x2, tempX2); caffe_gpu_dot<float>(K, tempX1, tempX1, &square_sum); //calculate 2 * \gamma * kernel float kernel = 0.0f; float tempGamma = gamma / 4.0f; for(int i = 0;i < 5;++i){ float temp = (0.0 - tempGamma) * square_sum; temp = exp(temp); kernel += 2 * tempGamma* temp; tempGamma = tempGamma * 2; } //calculate KK <- co * kernel * X^T * W + 1 * KK caffe_gpu_gemm<float>(CblasNoTrans, CblasTrans, 1, N, K, co*kernel, tempX2, W, 0.0, KK); } template<> void cal_add_item_gpu<double>(const double co, const int N, const int K, const double* W, const double* x1, double* tempX1, const double* x2, double* tempX2, const double gamma, double* KK){ //TODO: complete double version of this function } // Gradient with respect to weight for MMD //N: number of output neuron //K: dimension of the feature //M: size of all data //S: size of source data in a batch //W: weight of this layer //X: input of this layer //gamma: gamma / learning rate //delta_W: gredient of weight template<> void caffe_gpu_mmd<float>(const int N, const int K, const int M, const int S, const int labeledTargetSize, const float* W, const float* X, const float gamma, float* delta_W){ srand((unsigned int)time(0)); //output the value of delta_W before MMD gradient float* temp; CUDA_CHECK(cudaMalloc(&temp, N*K * sizeof(float))); caffe_gpu_sign(N*K, delta_W, temp); float sum; caffe_gpu_dot(N*K, delta_W, temp, &sum); LOG(INFO) << "delta_W before MMD, sum = " << sum << ", average = " << sum / (N*K); float* KK; float* tempX1; float* tempX2; CUDA_CHECK(cudaMalloc(&KK, N * sizeof(float))); CUDA_CHECK(cudaMalloc(&tempX1, K * sizeof(float))); CUDA_CHECK(cudaMalloc(&tempX2, K * sizeof(float))); float kernel_gamma = cal_gamma_gpu(K, M, N, W, X, tempX1, tempX2); int SS = (S>(M-S)) ? S : M-S; for(int i = 0;i < SS;++i){ //random int s1 = rand() % S; int s2 = rand() % S; if(s1 == s2){ s2 = (s2 + 100) % S; } // b-test O(n2) //for(int j = 0; j < SS; ++j){ /*LOG(INFO) << "111111"; */ /*LOG(INFO) << "NNNNN " << N;*/ /*LOG(INFO) << "KKKKK " << K;*/ /*LOG(INFO) << "MMMMM " << M;*/ /*LOG(INFO) << "SSSSS " << S;*/ int t1 = rand() % (M - S); int t2 = rand() % (M - S); if(t1 == t2){ t2 = (t2 + 100) % (M - S); } t1 = t1 + S; t2 = t2 + S; /*LOG(INFO) << "NNNNN " << N;*/ /*LOG(INFO) << "KKKKK " << K;*/ /*LOG(INFO) << "MMMMM " << M;*/ /*LOG(INFO) << "SSSSS " << S;*/ const float *x_s1 = X + s1 * K; const float *x_s2 = X + s2 * K; const float *x_t1 = X + t1 * K; const float *x_t2 = X + t2 * K; const float tempS = 1.0; //LOG(INFO) << "22222222"; //calculate four items of MMD gradient caffe_gpu_memset(sizeof(float) * N, 0, KK); cal_add_item_gpu<float>(-1, N, K, W, x_s1, tempX1, x_s2, tempX2, kernel_gamma, KK); caffe_gpu_gemm<float>(CblasNoTrans, CblasTrans, N, K, 1, tempS * gamma, KK,tempX2, 1.0, delta_W); caffe_gpu_memset(sizeof(float) * N, 0, KK); cal_add_item_gpu<float>(1, N, K, W, x_s1, tempX1, x_t2, tempX2, kernel_gamma, KK); caffe_gpu_gemm<float>(CblasNoTrans, CblasTrans, N, K, 1, tempS * gamma, KK,tempX2, 1.0, delta_W); caffe_gpu_memset(sizeof(float) * N, 0, KK); cal_add_item_gpu<float>(1, N, K, W, x_s2, tempX1, x_t1, tempX2, kernel_gamma, KK); caffe_gpu_gemm<float>(CblasNoTrans, CblasTrans, N, K, 1, tempS * gamma, KK,tempX2, 1.0, delta_W); caffe_gpu_memset(sizeof(float) * N, 0, KK); cal_add_item_gpu<float>(-1, N, K, W, x_t1, tempX1, x_t2, tempX2, kernel_gamma, KK); caffe_gpu_gemm<float>(CblasNoTrans, CblasTrans, N, K, 1, tempS * gamma, KK,tempX2, 1.0, delta_W); } //output the value of delta_W after MMD gradient caffe_gpu_sign(N*K, delta_W, temp); caffe_gpu_dot(N*K, delta_W, temp, &sum); LOG(INFO) << "delta_W after MMD, sum = " << sum << ", average = " << sum / (N*K); CUDA_CHECK(cudaFree(temp)); CUDA_CHECK(cudaFree(KK)); CUDA_CHECK(cudaFree(tempX1)); CUDA_CHECK(cudaFree(tempX2)); } template<> void caffe_gpu_mmd<double>(const int N, const int K, const int M, const int S, const int labeledTargetSize, const double* W, const double* X, const double gamma, double* delta_W){ //TODO: complete the double version of this function } } // namespace caffe
c6e4d5d1678bee82eddf0f21e92d2eaac0681b39.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <cfloat> #include <vector> #include "caffe/layers/kmeans_loss_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template<typename Dtype> __global__ void max_along_channel(const int nthreads, const int channels, const Dtype* distance, Dtype *pos, Dtype* max_value, const Dtype* label=NULL) { CUDA_KERNEL_LOOP(index, nthreads) { Dtype maxval = -FLT_MAX; const Dtype* dist_data = distance + index*channels; if (label == NULL) { for (int ch = 0; ch < channels; ++ch) { if (maxval < dist_data[ch]) { maxval = dist_data[ch]; pos[index] = ch; } } max_value[index] = maxval; } else { pos[index] = label[index]; max_value[index] = dist_data[(int)label[index]]; } } } template <typename Dtype> void KmeansLossLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { //cluster_centroid_dist_layer->Forward(distance_bottom_vec_, distance_top_vec_); const Dtype* distance_data = bottom[0]->gpu_data(); //distance_.gpu_data(); const int nthreads = bottom[0]->num(); Dtype* label = NULL; if (bottom.size() > 1) label = bottom[1]->mutable_gpu_data(); max_along_channel<Dtype> << <CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS >> >(nthreads, bottom[0]->channels(), distance_data, pos_.mutable_gpu_data(), max_value_set_.mutable_gpu_data(), label ); Dtype loss = max_value_set_.asum_data(); top[0]->mutable_cpu_data()[0] = loss / nthreads; } template <typename Dtype> __global__ void kmeans_diff_bp(const int nthreads, const int channels, Dtype *distance_diff, const Dtype *pos, Dtype loss_weight) { CUDA_KERNEL_LOOP(index, nthreads) { int ch = pos[index]; distance_diff[index*channels + ch] = loss_weight; } } template <typename Dtype> void KmeansLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const int nthreads = bottom[0]->num(); kmeans_diff_bp<Dtype> << <CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS >> >(nthreads, bottom[0]->channels(), //distance_.mutable_gpu_diff(), bottom[0]->mutable_gpu_diff(), pos_.gpu_data(), -top[0]->cpu_diff()[0]/nthreads); //cluster_centroid_dist_layer->Backward(distance_top_vec_, propagate_down, distance_bottom_vec_); } INSTANTIATE_LAYER_GPU_FUNCS(KmeansLossLayer); } // namespace caffe
c6e4d5d1678bee82eddf0f21e92d2eaac0681b39.cu
#include <algorithm> #include <cfloat> #include <vector> #include "caffe/layers/kmeans_loss_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template<typename Dtype> __global__ void max_along_channel(const int nthreads, const int channels, const Dtype* distance, Dtype *pos, Dtype* max_value, const Dtype* label=NULL) { CUDA_KERNEL_LOOP(index, nthreads) { Dtype maxval = -FLT_MAX; const Dtype* dist_data = distance + index*channels; if (label == NULL) { for (int ch = 0; ch < channels; ++ch) { if (maxval < dist_data[ch]) { maxval = dist_data[ch]; pos[index] = ch; } } max_value[index] = maxval; } else { pos[index] = label[index]; max_value[index] = dist_data[(int)label[index]]; } } } template <typename Dtype> void KmeansLossLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { //cluster_centroid_dist_layer->Forward(distance_bottom_vec_, distance_top_vec_); const Dtype* distance_data = bottom[0]->gpu_data(); //distance_.gpu_data(); const int nthreads = bottom[0]->num(); Dtype* label = NULL; if (bottom.size() > 1) label = bottom[1]->mutable_gpu_data(); max_along_channel<Dtype> << <CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS >> >(nthreads, bottom[0]->channels(), distance_data, pos_.mutable_gpu_data(), max_value_set_.mutable_gpu_data(), label ); Dtype loss = max_value_set_.asum_data(); top[0]->mutable_cpu_data()[0] = loss / nthreads; } template <typename Dtype> __global__ void kmeans_diff_bp(const int nthreads, const int channels, Dtype *distance_diff, const Dtype *pos, Dtype loss_weight) { CUDA_KERNEL_LOOP(index, nthreads) { int ch = pos[index]; distance_diff[index*channels + ch] = loss_weight; } } template <typename Dtype> void KmeansLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const int nthreads = bottom[0]->num(); kmeans_diff_bp<Dtype> << <CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS >> >(nthreads, bottom[0]->channels(), //distance_.mutable_gpu_diff(), bottom[0]->mutable_gpu_diff(), pos_.gpu_data(), -top[0]->cpu_diff()[0]/nthreads); //cluster_centroid_dist_layer->Backward(distance_top_vec_, propagate_down, distance_bottom_vec_); } INSTANTIATE_LAYER_GPU_FUNCS(KmeansLossLayer); } // namespace caffe
326f839e5310d49780946ae75dd9c08b25bce754.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "THHUNN.h" #include "common.h" template <typename Dtype> __global__ void MaxUnpoolForward(const int nthreads, const Dtype* bottom_data, const Dtype* bottom_mask, const int num, const int channels, const int iheight, const int iwidth, const int oheight, const int owidth, Dtype* top_data) { CUDA_KERNEL_LOOP(index, nthreads) { //index here indices the input pixels int c = (index / iwidth / iheight) % channels; int n = index / iwidth / iheight / channels; top_data += (n*channels + c)*oheight*owidth; int maxind = bottom_mask[index]-1; top_data[maxind] = bottom_data[index]; } } template <typename Dtype> __global__ void MaxUnpoolBackward(const int nthreads, const Dtype* top_diff, const Dtype* bottom_mask, const int num, const int channels, const int iheight, const int iwidth, const int oheight, const int owidth, Dtype* bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { int c = (index / iwidth / iheight) % channels; int n = index / iwidth / iheight / channels; top_diff += (n*channels + c)*oheight*owidth; int maxind = bottom_mask[index]-1; bottom_diff[index] = top_diff[maxind]; } } void THNN_CudaSpatialMaxUnpooling_updateOutput(THCState *state, THCudaTensor *input, THCudaTensor *output, THCudaTensor *indices, int owidth, int oheight) { THCUNN_assertSameGPU(state, 3, input, output, indices); THArgCheck(input->nDimension == 3 || input->nDimension == 4, 2, "3D or 4D (batch) tensor expected"); long nInputCols, nInputRows, nInputPlane, batchSize; if (input->nDimension == 3) { nInputCols = input->size[2]; nInputRows = input->size[1]; nInputPlane = input->size[0]; batchSize = 1; } else { nInputCols = input->size[3]; nInputRows = input->size[2]; nInputPlane = input->size[1]; batchSize = input->size[0]; } input = THCudaTensor_newContiguous(state, input); indices = THCudaTensor_newContiguous(state, indices); THCudaTensor_resize4d(state, output, batchSize, nInputPlane, oheight, owidth); THCudaTensor_zero(state, output); int count = THCudaTensor_nElement(state, input); hipLaunchKernelGGL(( MaxUnpoolForward) , dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state) , count, THCudaTensor_data(state, input), THCudaTensor_data(state, indices), batchSize, nInputPlane, nInputRows, nInputCols, oheight, owidth, THCudaTensor_data(state, output)); THCudaCheck(hipGetLastError()); if(input->nDimension == 3) THCudaTensor_resize3d(state, output, nInputPlane, oheight, owidth); THCudaTensor_free(state, input); } void THNN_CudaSpatialMaxUnpooling_updateGradInput(THCState *state, THCudaTensor *input, THCudaTensor *gradOutput, THCudaTensor *gradInput, THCudaTensor *indices, int owidth, int oheight) { THCUNN_assertSameGPU(state, 4, input, gradOutput, indices, gradInput); long nInputCols, nInputRows, nInputPlane, batchSize; if (input->nDimension == 3) { nInputCols = input->size[2]; nInputRows = input->size[1]; nInputPlane = input->size[0]; batchSize = 1; } else { nInputCols = input->size[3]; nInputRows = input->size[2]; nInputPlane = input->size[1]; batchSize = input->size[0]; } input = THCudaTensor_newContiguous(state, input); indices = THCudaTensor_newContiguous(state, indices); gradOutput = THCudaTensor_newContiguous(state, gradOutput); THCudaTensor_resizeAs(state, gradInput, input); int count = THCudaTensor_nElement(state, input); hipLaunchKernelGGL(( MaxUnpoolBackward) , dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state) , count, THCudaTensor_data(state, gradOutput), THCudaTensor_data(state, indices), batchSize, nInputPlane, nInputRows, nInputCols, oheight, owidth, THCudaTensor_data(state, gradInput)); THCudaCheck(hipGetLastError()); // clean THCudaTensor_free(state, input); THCudaTensor_free(state, gradOutput); }
326f839e5310d49780946ae75dd9c08b25bce754.cu
#include "THCUNN.h" #include "common.h" template <typename Dtype> __global__ void MaxUnpoolForward(const int nthreads, const Dtype* bottom_data, const Dtype* bottom_mask, const int num, const int channels, const int iheight, const int iwidth, const int oheight, const int owidth, Dtype* top_data) { CUDA_KERNEL_LOOP(index, nthreads) { //index here indices the input pixels int c = (index / iwidth / iheight) % channels; int n = index / iwidth / iheight / channels; top_data += (n*channels + c)*oheight*owidth; int maxind = bottom_mask[index]-1; top_data[maxind] = bottom_data[index]; } } template <typename Dtype> __global__ void MaxUnpoolBackward(const int nthreads, const Dtype* top_diff, const Dtype* bottom_mask, const int num, const int channels, const int iheight, const int iwidth, const int oheight, const int owidth, Dtype* bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { int c = (index / iwidth / iheight) % channels; int n = index / iwidth / iheight / channels; top_diff += (n*channels + c)*oheight*owidth; int maxind = bottom_mask[index]-1; bottom_diff[index] = top_diff[maxind]; } } void THNN_CudaSpatialMaxUnpooling_updateOutput(THCState *state, THCudaTensor *input, THCudaTensor *output, THCudaTensor *indices, int owidth, int oheight) { THCUNN_assertSameGPU(state, 3, input, output, indices); THArgCheck(input->nDimension == 3 || input->nDimension == 4, 2, "3D or 4D (batch) tensor expected"); long nInputCols, nInputRows, nInputPlane, batchSize; if (input->nDimension == 3) { nInputCols = input->size[2]; nInputRows = input->size[1]; nInputPlane = input->size[0]; batchSize = 1; } else { nInputCols = input->size[3]; nInputRows = input->size[2]; nInputPlane = input->size[1]; batchSize = input->size[0]; } input = THCudaTensor_newContiguous(state, input); indices = THCudaTensor_newContiguous(state, indices); THCudaTensor_resize4d(state, output, batchSize, nInputPlane, oheight, owidth); THCudaTensor_zero(state, output); int count = THCudaTensor_nElement(state, input); MaxUnpoolForward <<< GET_BLOCKS(count), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state) >>> (count, THCudaTensor_data(state, input), THCudaTensor_data(state, indices), batchSize, nInputPlane, nInputRows, nInputCols, oheight, owidth, THCudaTensor_data(state, output)); THCudaCheck(cudaGetLastError()); if(input->nDimension == 3) THCudaTensor_resize3d(state, output, nInputPlane, oheight, owidth); THCudaTensor_free(state, input); } void THNN_CudaSpatialMaxUnpooling_updateGradInput(THCState *state, THCudaTensor *input, THCudaTensor *gradOutput, THCudaTensor *gradInput, THCudaTensor *indices, int owidth, int oheight) { THCUNN_assertSameGPU(state, 4, input, gradOutput, indices, gradInput); long nInputCols, nInputRows, nInputPlane, batchSize; if (input->nDimension == 3) { nInputCols = input->size[2]; nInputRows = input->size[1]; nInputPlane = input->size[0]; batchSize = 1; } else { nInputCols = input->size[3]; nInputRows = input->size[2]; nInputPlane = input->size[1]; batchSize = input->size[0]; } input = THCudaTensor_newContiguous(state, input); indices = THCudaTensor_newContiguous(state, indices); gradOutput = THCudaTensor_newContiguous(state, gradOutput); THCudaTensor_resizeAs(state, gradInput, input); int count = THCudaTensor_nElement(state, input); MaxUnpoolBackward <<< GET_BLOCKS(count), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state) >>> (count, THCudaTensor_data(state, gradOutput), THCudaTensor_data(state, indices), batchSize, nInputPlane, nInputRows, nInputCols, oheight, owidth, THCudaTensor_data(state, gradInput)); THCudaCheck(cudaGetLastError()); // clean THCudaTensor_free(state, input); THCudaTensor_free(state, gradOutput); }
02fc11f89b400b0e51a2c465ad31c680ee0a2020.hip
// !!! This is a file automatically generated by hipify!!! /// LSU EE 4702-1 (Fall 2018), GPU Programming // /// Shared memory CUDA Example, without LSU ECE helper classes. /// References // // :ccpg10: CUDA C Programming Guide Version 10 // https://docs.nvidia.com/cuda/cuda-c-programming-guide // #if 0 /// Background /// Shared Address Space and Shared Memory // // References // General description: CUDA C Programming Guide Section 3.2.3 (v10) // Amount of SM: CUDA C Programming Guide Section Table 13 (Appendix H) // // // :Def: Shared Address Space // An address space provided by CUDA (through CC 7.x) in which: // // - Each block has its own address space. // // - Address space is shared by all threads in a block. // // - Locations can be read and written. // // - Size of space is 48 kiB in CC 2.X to CC 6.x. // Size of space is 96 kiB on CC 7.0 and 64 kiB on CC 7.5. // // - Shared address space uses shared memory. // // // :Def: Shared Memory // Hardware used to implement the shared address space. // // - Shared memory is part of SM, so no communication limits. // // - Amount of shared memory per SM (NOT per block) varies: // 48 kiB CC 2.0 - CC 3.5 // 112 kiB CC 3.7 // 64 kiB CC 5.0, 5.3, 6.0, 6.2, 7.5 // 96 kiB CC 5.2, 6.1, 7.0 // // - Low latency (fast). As low as 12 cycles. // // - High throughput. // // - Banked organization. Throughput depends on access patterns. // // /// Declaration and Use of Shared Memory // // - Declare variables using __shared__ qualifier. // // - Declaration can be at procedure or global scope. // // - Any type can be shared, including arrays. // // - Pointers to shared variables can be taken. // // :Example: Declaration examples. __shared__ int amount; __shared__ float4 forces[12]; /// Shared Memory Uses // // Communication between threads. // For example, to compute a block-wide sum. // // Caching of global memory. // (Copying to a place where it can be accessed quickly.) /// Barrier Synchronization (__syncthreads, __syncwarp) // // When a group is working together on a multi-step project .. // .. sometimes everyone must finish step x .. // .. before anyone can start on step x + 1. // // The same thing holds for threads. // // :Def: Barrier // A place in the execution of a code by a group of threads .. // .. which all must reach before any can exit. // // A barrier is like a room with two doors, the entrance and exit. // Initially the entrance is open and the exit is closed. // Threads enter the room but can't leave. (It's comfortable.) // When the last thread enters .. // .. the entrance closes .. // .. and the exit opens. // // // CUDA provides facilities that allow for threads within a block to // synchronize at places in the code. The simplest is __syncthreads: // /// __syncthreads // Implements a block-wide barrier. x = a + b; // Stuff before. __syncthreads(); y = c + d; // Stuff after. // // - Must be called by all (active) threads in a block. (Or by none at all.) // - No thread executes "Stuff after" until all threads call __syncthreads. // /// __syncwarp // Implements a warp-wide barrier. // // Uses of __syncwarp can be omitted in places where the compiler will // converge the threads in a warp. /// Atomic Operations // // :Def: Atomic Operation // An operation that appears to be either .. // .. completely finished or .. // .. not yet started. // An atomic operation NEVER appears to be partially done. // :Example: // // A the following "+=" operation is NOT atomic. // __shared__ int sum; if ( threadIdx.x == 0 ) sum = 0; if ( threadIdx.x == 40 ) sum += 40; if ( threadIdx.x == 70 ) sum += 70; // // An we need an atomic operation to perform the additions above. /// CUDA C Atomic Operations // // Reference: CUDA C Programming Guide B.12 // /// oldval = atomicAdd( valAddress, amount ) // Atomically add amount to *valAddress, return old value. /// Other Stuff // // Need to coordinate readers and writers. __syncthreads(); atomicAdd(POINTER, AMT); #endif #include <pthread.h> #include <string.h> #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <errno.h> #include <ctype.h> #include <time.h> #include <new> #include <hip/hip_runtime.h> #include <gp/cuda-gpuinfo.h> inline double time_fp() { struct timespec tp; clock_gettime(CLOCK_REALTIME,&tp); return ((double)tp.tv_sec)+((double)tp.tv_nsec) * 0.000000001; } #if 0 __global__ void cuda_thread_super_simple(int *output_data, int *input_data) { const int tid = threadIdx.x + blockIdx.x * blockDim.x; int my_element = input_data[tid]; /// Reasonable use of shared memory. // // Make thread 12's element available to all threads in the block. __shared__ int a; if ( threadIdx.x == 12 ) a = my_element; __syncthreads(); output_data[tid] = my_element + a; /// Bad use of shared memory. // // Everyone writes trouble. __shared__ int trouble; trouble = my_element; __syncthreads(); // All threads read the same value, whoever got there last. output_data[tid] = my_element + trouble; /// Reasonable use of shared memory. // // Share your array element with our neighbor. __shared__ int our_data[1024]; our_data[threadIdx.x] = my_element; __syncthreads(); output_data[tid] = my_element + our_data[threadIdx.x ^ 1]; /// Bad use of shared memory. // // Bank conflicts. __shared__ int another_array[512 * 32]; // All threads in a block will write to same bank. // This will slow down execution by a factor of 32. // another_array[ threadIdx.x * 32 ] = my_element; } #endif struct Vector { float a[4]; }; struct App { int num_threads; int array_size; Vector *v_in; float *m_out; Vector *d_v_in; float *d_m_out; float *block_mag_sum; float *d_block_mag_sum; }; // In host address space. App app; // In device constant address space. __constant__ App d_app; /// /// GPU Code (Kernel) /// __global__ void cuda_thread_start() { const int tid = threadIdx.x + blockIdx.x * blockDim.x; // Shared array, one element for each member of block (up to max bl size). __shared__ float our_mag_sums[1024]; float my_mag_sum = 0; for ( int h=tid; h<d_app.array_size; h += d_app.num_threads ) { Vector p = d_app.d_v_in[h]; float sos = 0; for ( int i=0; i<4; i++ ) sos += p.a[i] * p.a[i]; const float mag = sqrtf( sos ); // Write magnitude to global memory. d_app.d_m_out[h] = mag; // Compute this thread's magnitude sum. my_mag_sum += mag; } // Save this thread's magnitude sum in shared memory. // our_mag_sums[threadIdx.x] = my_mag_sum; // Wait for all threads to do this. // __syncthreads(); // All but the first warp are finished. // if ( threadIdx.x >= 32 ) return; // Threads in first warp (first 32) each compute sum for their lane. // float lane_mag_sum = 0; for ( int i=threadIdx.x; i<blockDim.x; i+=32 ) lane_mag_sum += our_mag_sums[i]; // Save the sum for this lane in shared memory. // our_mag_sums[threadIdx.x] = lane_mag_sum; // Have just thread 0 finish up. // if ( threadIdx.x != 0 ) return; // Compute the sum of the last 32 elements. // float block_mag_sum = 0; for ( int i=0; i<32; i++ ) block_mag_sum += our_mag_sums[i]; // Save this sum to global memory. CPU will sum of blocks. // d_app.d_block_mag_sum[blockIdx.x] = block_mag_sum; } /// /// Collect Information About GPU and Code /// void cuda_init() { GPU_Info gpu_info; gpu_info_print(); // Choose GPU 0 because we don't have time to provide a way to let // the user choose. // int dev = gpu_choose_index(); CE(hipSetDevice(dev)); printf("Using GPU %d\n",dev); gpu_info.get_gpu_info(dev); gpu_info.GET_INFO(cuda_thread_start); // Print information about time_step routine. // printf("\nCUDA Routine Resource Usage:\n"); for ( int i=0; i<gpu_info.num_kernels; i++ ) { printf("For %s:\n", gpu_info.ki[i].name); printf(" %6zd shared, %zd const, %zd loc, %d regs; " "%d max threads per block.\n", gpu_info.ki[i].cfa.sharedSizeBytes, gpu_info.ki[i].cfa.constSizeBytes, gpu_info.ki[i].cfa.localSizeBytes, gpu_info.ki[i].cfa.numRegs, gpu_info.ki[i].cfa.maxThreadsPerBlock); } printf("\n"); } /// /// Main Routine /// int main(int argc, char **argv) { const int threads_per_block = argc < 2 ? 1 : atoi(argv[1]); const int blocks_per_grid = argc < 3 ? 1 : atoi(argv[2]); app.num_threads = threads_per_block * blocks_per_grid; app.array_size = argc < 4 ? 1 << 20 : int( atof(argv[3]) * (1<<20) ); const int array_size_bytes = app.array_size * sizeof(app.v_in[0]); const int out_array_size_bytes = app.array_size * sizeof(app.m_out[0]); const int block_mag_sum_bytes = blocks_per_grid * sizeof(app.block_mag_sum[0]); if ( argc < 2 ) cuda_init(); // Allocate storage for CPU copy of data. // app.v_in = new Vector[app.array_size]; app.m_out = new float[app.array_size]; app.block_mag_sum = new float[blocks_per_grid]; // Allocate storage for GPU copy of data. // CE( hipMalloc( &app.d_v_in, array_size_bytes ) ); CE( hipMalloc( &app.d_m_out, out_array_size_bytes ) ); CE( hipMalloc( &app.d_block_mag_sum, block_mag_sum_bytes ) ); printf("Preparing for %d threads %d elements using %d blocks of size %d.\n", app.num_threads, app.array_size, blocks_per_grid, threads_per_block); // Initialize input array. // for ( int i=0; i<app.array_size; i++ ) for ( int j=0; j<4; j++ ) app.v_in[i].a[j] = drand48(); const double time_start = time_fp(); // Copy input array from CPU to GPU. // CE( hipMemcpy ( app.d_v_in, app.v_in, array_size_bytes, hipMemcpyHostToDevice ) ); // Copy App structure to GPU. // CE( hipMemcpyToSymbol ( d_app, &app, sizeof(app), 0, hipMemcpyHostToDevice ) ); /// Launch Kernel hipLaunchKernelGGL(( cuda_thread_start), dim3(blocks_per_grid),dim3(threads_per_block), 0, 0, ); // Copy output arrays from GPU to CPU. // CE( hipMemcpy ( app.m_out, app.d_m_out, out_array_size_bytes, hipMemcpyDeviceToHost) ); CE( hipMemcpy ( app.block_mag_sum, app.d_block_mag_sum, block_mag_sum_bytes, hipMemcpyDeviceToHost) ); float mag_sum = 0; for ( int i=0; i<blocks_per_grid; i++ ) mag_sum += app.block_mag_sum[i]; const double data_size = app.array_size * ( sizeof(Vector) + sizeof(float) ); const double fp_op_count = app.array_size * 5; const double elapsed_time = time_fp() - time_start; float mag_sum_check = 0; for ( int i=0; i<app.array_size; i++ ) mag_sum_check += app.m_out[i]; const float mag_avg_check = mag_sum_check / app.array_size; const float mag_avg = mag_sum / app.array_size; if ( fabs(mag_avg_check-mag_avg) > 0.00001 ) printf("** Averages don't check %.7f != %.7f (cpu)\n", mag_avg, mag_avg_check); printf("Elapsed time for %d threads and %d elements is %.3f s\n", app.num_threads, app.array_size, 1e6 * elapsed_time); printf("Rate %.3f GFLOPS, %.3f GB/s\n", 1e-9 * fp_op_count / elapsed_time, 1e-9 * data_size / elapsed_time); }
02fc11f89b400b0e51a2c465ad31c680ee0a2020.cu
/// LSU EE 4702-1 (Fall 2018), GPU Programming // /// Shared memory CUDA Example, without LSU ECE helper classes. /// References // // :ccpg10: CUDA C Programming Guide Version 10 // https://docs.nvidia.com/cuda/cuda-c-programming-guide // #if 0 /// Background /// Shared Address Space and Shared Memory // // References // General description: CUDA C Programming Guide Section 3.2.3 (v10) // Amount of SM: CUDA C Programming Guide Section Table 13 (Appendix H) // // // :Def: Shared Address Space // An address space provided by CUDA (through CC 7.x) in which: // // - Each block has its own address space. // // - Address space is shared by all threads in a block. // // - Locations can be read and written. // // - Size of space is 48 kiB in CC 2.X to CC 6.x. // Size of space is 96 kiB on CC 7.0 and 64 kiB on CC 7.5. // // - Shared address space uses shared memory. // // // :Def: Shared Memory // Hardware used to implement the shared address space. // // - Shared memory is part of SM, so no communication limits. // // - Amount of shared memory per SM (NOT per block) varies: // 48 kiB CC 2.0 - CC 3.5 // 112 kiB CC 3.7 // 64 kiB CC 5.0, 5.3, 6.0, 6.2, 7.5 // 96 kiB CC 5.2, 6.1, 7.0 // // - Low latency (fast). As low as 12 cycles. // // - High throughput. // // - Banked organization. Throughput depends on access patterns. // // /// Declaration and Use of Shared Memory // // - Declare variables using __shared__ qualifier. // // - Declaration can be at procedure or global scope. // // - Any type can be shared, including arrays. // // - Pointers to shared variables can be taken. // // :Example: Declaration examples. __shared__ int amount; __shared__ float4 forces[12]; /// Shared Memory Uses // // Communication between threads. // For example, to compute a block-wide sum. // // Caching of global memory. // (Copying to a place where it can be accessed quickly.) /// Barrier Synchronization (__syncthreads, __syncwarp) // // When a group is working together on a multi-step project .. // .. sometimes everyone must finish step x .. // .. before anyone can start on step x + 1. // // The same thing holds for threads. // // :Def: Barrier // A place in the execution of a code by a group of threads .. // .. which all must reach before any can exit. // // A barrier is like a room with two doors, the entrance and exit. // Initially the entrance is open and the exit is closed. // Threads enter the room but can't leave. (It's comfortable.) // When the last thread enters .. // .. the entrance closes .. // .. and the exit opens. // // // CUDA provides facilities that allow for threads within a block to // synchronize at places in the code. The simplest is __syncthreads: // /// __syncthreads // Implements a block-wide barrier. x = a + b; // Stuff before. __syncthreads(); y = c + d; // Stuff after. // // - Must be called by all (active) threads in a block. (Or by none at all.) // - No thread executes "Stuff after" until all threads call __syncthreads. // /// __syncwarp // Implements a warp-wide barrier. // // Uses of __syncwarp can be omitted in places where the compiler will // converge the threads in a warp. /// Atomic Operations // // :Def: Atomic Operation // An operation that appears to be either .. // .. completely finished or .. // .. not yet started. // An atomic operation NEVER appears to be partially done. // :Example: // // A the following "+=" operation is NOT atomic. // __shared__ int sum; if ( threadIdx.x == 0 ) sum = 0; if ( threadIdx.x == 40 ) sum += 40; if ( threadIdx.x == 70 ) sum += 70; // // An we need an atomic operation to perform the additions above. /// CUDA C Atomic Operations // // Reference: CUDA C Programming Guide B.12 // /// oldval = atomicAdd( valAddress, amount ) // Atomically add amount to *valAddress, return old value. /// Other Stuff // // Need to coordinate readers and writers. __syncthreads(); atomicAdd(POINTER, AMT); #endif #include <pthread.h> #include <string.h> #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <errno.h> #include <ctype.h> #include <time.h> #include <new> #include <cuda_runtime.h> #include <gp/cuda-gpuinfo.h> inline double time_fp() { struct timespec tp; clock_gettime(CLOCK_REALTIME,&tp); return ((double)tp.tv_sec)+((double)tp.tv_nsec) * 0.000000001; } #if 0 __global__ void cuda_thread_super_simple(int *output_data, int *input_data) { const int tid = threadIdx.x + blockIdx.x * blockDim.x; int my_element = input_data[tid]; /// Reasonable use of shared memory. // // Make thread 12's element available to all threads in the block. __shared__ int a; if ( threadIdx.x == 12 ) a = my_element; __syncthreads(); output_data[tid] = my_element + a; /// Bad use of shared memory. // // Everyone writes trouble. __shared__ int trouble; trouble = my_element; __syncthreads(); // All threads read the same value, whoever got there last. output_data[tid] = my_element + trouble; /// Reasonable use of shared memory. // // Share your array element with our neighbor. __shared__ int our_data[1024]; our_data[threadIdx.x] = my_element; __syncthreads(); output_data[tid] = my_element + our_data[threadIdx.x ^ 1]; /// Bad use of shared memory. // // Bank conflicts. __shared__ int another_array[512 * 32]; // All threads in a block will write to same bank. // This will slow down execution by a factor of 32. // another_array[ threadIdx.x * 32 ] = my_element; } #endif struct Vector { float a[4]; }; struct App { int num_threads; int array_size; Vector *v_in; float *m_out; Vector *d_v_in; float *d_m_out; float *block_mag_sum; float *d_block_mag_sum; }; // In host address space. App app; // In device constant address space. __constant__ App d_app; /// /// GPU Code (Kernel) /// __global__ void cuda_thread_start() { const int tid = threadIdx.x + blockIdx.x * blockDim.x; // Shared array, one element for each member of block (up to max bl size). __shared__ float our_mag_sums[1024]; float my_mag_sum = 0; for ( int h=tid; h<d_app.array_size; h += d_app.num_threads ) { Vector p = d_app.d_v_in[h]; float sos = 0; for ( int i=0; i<4; i++ ) sos += p.a[i] * p.a[i]; const float mag = sqrtf( sos ); // Write magnitude to global memory. d_app.d_m_out[h] = mag; // Compute this thread's magnitude sum. my_mag_sum += mag; } // Save this thread's magnitude sum in shared memory. // our_mag_sums[threadIdx.x] = my_mag_sum; // Wait for all threads to do this. // __syncthreads(); // All but the first warp are finished. // if ( threadIdx.x >= 32 ) return; // Threads in first warp (first 32) each compute sum for their lane. // float lane_mag_sum = 0; for ( int i=threadIdx.x; i<blockDim.x; i+=32 ) lane_mag_sum += our_mag_sums[i]; // Save the sum for this lane in shared memory. // our_mag_sums[threadIdx.x] = lane_mag_sum; // Have just thread 0 finish up. // if ( threadIdx.x != 0 ) return; // Compute the sum of the last 32 elements. // float block_mag_sum = 0; for ( int i=0; i<32; i++ ) block_mag_sum += our_mag_sums[i]; // Save this sum to global memory. CPU will sum of blocks. // d_app.d_block_mag_sum[blockIdx.x] = block_mag_sum; } /// /// Collect Information About GPU and Code /// void cuda_init() { GPU_Info gpu_info; gpu_info_print(); // Choose GPU 0 because we don't have time to provide a way to let // the user choose. // int dev = gpu_choose_index(); CE(cudaSetDevice(dev)); printf("Using GPU %d\n",dev); gpu_info.get_gpu_info(dev); gpu_info.GET_INFO(cuda_thread_start); // Print information about time_step routine. // printf("\nCUDA Routine Resource Usage:\n"); for ( int i=0; i<gpu_info.num_kernels; i++ ) { printf("For %s:\n", gpu_info.ki[i].name); printf(" %6zd shared, %zd const, %zd loc, %d regs; " "%d max threads per block.\n", gpu_info.ki[i].cfa.sharedSizeBytes, gpu_info.ki[i].cfa.constSizeBytes, gpu_info.ki[i].cfa.localSizeBytes, gpu_info.ki[i].cfa.numRegs, gpu_info.ki[i].cfa.maxThreadsPerBlock); } printf("\n"); } /// /// Main Routine /// int main(int argc, char **argv) { const int threads_per_block = argc < 2 ? 1 : atoi(argv[1]); const int blocks_per_grid = argc < 3 ? 1 : atoi(argv[2]); app.num_threads = threads_per_block * blocks_per_grid; app.array_size = argc < 4 ? 1 << 20 : int( atof(argv[3]) * (1<<20) ); const int array_size_bytes = app.array_size * sizeof(app.v_in[0]); const int out_array_size_bytes = app.array_size * sizeof(app.m_out[0]); const int block_mag_sum_bytes = blocks_per_grid * sizeof(app.block_mag_sum[0]); if ( argc < 2 ) cuda_init(); // Allocate storage for CPU copy of data. // app.v_in = new Vector[app.array_size]; app.m_out = new float[app.array_size]; app.block_mag_sum = new float[blocks_per_grid]; // Allocate storage for GPU copy of data. // CE( cudaMalloc( &app.d_v_in, array_size_bytes ) ); CE( cudaMalloc( &app.d_m_out, out_array_size_bytes ) ); CE( cudaMalloc( &app.d_block_mag_sum, block_mag_sum_bytes ) ); printf("Preparing for %d threads %d elements using %d blocks of size %d.\n", app.num_threads, app.array_size, blocks_per_grid, threads_per_block); // Initialize input array. // for ( int i=0; i<app.array_size; i++ ) for ( int j=0; j<4; j++ ) app.v_in[i].a[j] = drand48(); const double time_start = time_fp(); // Copy input array from CPU to GPU. // CE( cudaMemcpy ( app.d_v_in, app.v_in, array_size_bytes, cudaMemcpyHostToDevice ) ); // Copy App structure to GPU. // CE( cudaMemcpyToSymbol ( d_app, &app, sizeof(app), 0, cudaMemcpyHostToDevice ) ); /// Launch Kernel cuda_thread_start<<<blocks_per_grid,threads_per_block>>>(); // Copy output arrays from GPU to CPU. // CE( cudaMemcpy ( app.m_out, app.d_m_out, out_array_size_bytes, cudaMemcpyDeviceToHost) ); CE( cudaMemcpy ( app.block_mag_sum, app.d_block_mag_sum, block_mag_sum_bytes, cudaMemcpyDeviceToHost) ); float mag_sum = 0; for ( int i=0; i<blocks_per_grid; i++ ) mag_sum += app.block_mag_sum[i]; const double data_size = app.array_size * ( sizeof(Vector) + sizeof(float) ); const double fp_op_count = app.array_size * 5; const double elapsed_time = time_fp() - time_start; float mag_sum_check = 0; for ( int i=0; i<app.array_size; i++ ) mag_sum_check += app.m_out[i]; const float mag_avg_check = mag_sum_check / app.array_size; const float mag_avg = mag_sum / app.array_size; if ( fabs(mag_avg_check-mag_avg) > 0.00001 ) printf("** Averages don't check %.7f != %.7f (cpu)\n", mag_avg, mag_avg_check); printf("Elapsed time for %d threads and %d elements is %.3f µs\n", app.num_threads, app.array_size, 1e6 * elapsed_time); printf("Rate %.3f GFLOPS, %.3f GB/s\n", 1e-9 * fp_op_count / elapsed_time, 1e-9 * data_size / elapsed_time); }
fdb4f5e9b34389386bdd78f87a4266ae4df6db4b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //#include <iostream> //#include <time.h> //#include <float.h> //#include <hiprand/hiprand_kernel.h> //#include "vec3.h" //#include "ray.h" //#include "sphere.h" //#include "hitable_list.h" //#include "camera.h" //#include "material.h" // //// limited version of checkCudaErrors from helper_cuda.h in CUDA examples //#define checkCudaErrors(val) check_cuda( (val), #val, __FILE__, __LINE__ ) // //void check_cuda(hipError_t result, char const *const func, const char *const file, int const line) { // if (result) { // std::cerr << "CUDA error = " << static_cast<unsigned int>(result) << " at " << // file << ":" << line << " '" << func << "' \n"; // // Make sure we call CUDA Device Reset before exiting // hipDeviceReset(); // exit(99); // } //} // //// Matching the C++ code would recurse enough into color() calls that //// it was blowing up the stack, so we have to turn this into a //// limited-depth loop instead. Later code in the book limits to a max //// depth of 50, so we adapt this a few chapters early on the GPU. //__device__ vec3 color(const ray& r, hitable **world, hiprandState_t *local_rand_state) { // ray cur_ray = r; // vec3 cur_attenuation = vec3(1.0, 1.0, 1.0); // for (int i = 0; i < 50; i++) { // hit_record rec; // if ((*world)->hit(cur_ray, 0.001f, FLT_MAX, rec)) { // ray scattered; // vec3 attenuation; // if (rec.mat_ptr->scatter(cur_ray, rec, attenuation, scattered, local_rand_state)) { // cur_attenuation *= attenuation; // cur_ray = scattered; // } // else { // return vec3(0.0, 0.0, 0.0); // } // } // else { // vec3 unit_direction = unit_vector(cur_ray.direction()); // float t = 0.5f*(unit_direction.y() + 1.0f); // vec3 c = (1.0f - t)*vec3(1.0, 1.0, 1.0) + t*vec3(0.5, 0.7, 1.0); // return cur_attenuation * c; // } // } // return vec3(0.0, 0.0, 0.0); // exceeded recursion //} // //__global__ void render_init(int max_x, int max_y, hiprandState_t *rand_state) { // int i = threadIdx.x + blockIdx.x * blockDim.x; // int j = threadIdx.y + blockIdx.y * blockDim.y; // if ((i >= max_x) || (j >= max_y)) return; // int pixel_index = j*max_x + i; // //Each thread gets same seed, a different sequence number, no offset // hiprand_init(1984, pixel_index, 0, &rand_state[pixel_index]); //} // //__global__ void render(vec3 *fb, int max_x, int max_y, int ns, camera **cam, hitable **world, hiprandState_t *rand_state) { // int i = threadIdx.x + blockIdx.x * blockDim.x; // int j = threadIdx.y + blockIdx.y * blockDim.y; // if ((i >= max_x) || (j >= max_y)) return; // int pixel_index = j*max_x + i; // hiprandState_t local_rand_state = rand_state[pixel_index]; // vec3 col(0, 0, 0); // for (int s = 0; s < ns; s++) { // float u = float(i + hiprand_uniform(&local_rand_state)) / float(max_x); // float v = float(j + hiprand_uniform(&local_rand_state)) / float(max_y); // ray r = (*cam)->get_ray(u, v); // col += color(r, world, &local_rand_state); // } // rand_state[pixel_index] = local_rand_state; // col /= float(ns); // col[0] = sqrt(col[0]); // col[1] = sqrt(col[1]); // col[2] = sqrt(col[2]); // fb[pixel_index] = col; //} // //__global__ void create_world(hitable **d_list, hitable **d_world, camera **d_camera, int nx, int ny) { // if (threadIdx.x == 0 && blockIdx.x == 0) { // d_list[0] = new sphere(vec3(0, 0, -1), 0.5, // new lambertian(vec3(0.1, 0.2, 0.5))); // d_list[1] = new sphere(vec3(0, -100.5, -1), 100, // new lambertian(vec3(0.8, 0.8, 0.0))); // d_list[2] = new sphere(vec3(1, 0, -1), 0.5, // new metal(vec3(0.8, 0.6, 0.2), 0.0)); // d_list[3] = new sphere(vec3(-1, 0, -1), 0.5, // new dielectric(1.5)); // d_list[4] = new sphere(vec3(-1, 0, -1), -0.45, // new dielectric(1.5)); // *d_world = new hitable_list(d_list, 5); // *d_camera = new camera(vec3(-2, 2, 1), // vec3(0, 0, -1), // vec3(0, 1, 0), // 20.0, // float(nx) / float(ny)); // } //} // //__global__ void free_world(hitable **d_list, hitable **d_world, camera **d_camera) { // for (int i = 0; i < 5; i++) { // delete ((sphere *)d_list[i])->mat_ptr; // delete d_list[i]; // } // delete *d_world; // delete *d_camera; //} // //int main() { // int nx = 1200; // int ny = 600; // int ns = 100; // int tx = 8; // int ty = 8; // // std::cerr << "Rendering a " << nx << "x" << ny << " image with " << ns << " samples per pixel "; // std::cerr << "in " << tx << "x" << ty << " blocks.\n"; // // int num_pixels = nx*ny; // size_t fb_size = num_pixels * sizeof(vec3); // // // allocate FB // vec3 *fb; // checkCudaErrors(hipMallocManaged((void **)&fb, fb_size)); // // // allocate random state // hiprandState_t *d_rand_state; // checkCudaErrors(hipMalloc((void **)&d_rand_state, num_pixels * sizeof(hiprandState_t))); // // // make our world of hitables & the camera // hitable **d_list; // checkCudaErrors(hipMalloc((void **)&d_list, 5 * sizeof(hitable *))); // hitable **d_world; // checkCudaErrors(hipMalloc((void **)&d_world, sizeof(hitable *))); // camera **d_camera; // checkCudaErrors(hipMalloc((void **)&d_camera, sizeof(camera *))); // create_world << <1, 1 >> >(d_list, d_world, d_camera, nx, ny); // checkCudaErrors(hipGetLastError()); // checkCudaErrors(hipDeviceSynchronize()); // // clock_t start, stop; // start = clock(); // // Render our buffer // dim3 blocks(nx / tx + 1, ny / ty + 1); // dim3 threads(tx, ty); // render_init << <blocks, threads >> >(nx, ny, d_rand_state); // checkCudaErrors(hipGetLastError()); // checkCudaErrors(hipDeviceSynchronize()); // render << <blocks, threads >> >(fb, nx, ny, ns, d_camera, d_world, d_rand_state); // checkCudaErrors(hipGetLastError()); // checkCudaErrors(hipDeviceSynchronize()); // stop = clock(); // double timer_seconds = ((double)(stop - start)) / CLOCKS_PER_SEC; // std::cerr << "took " << timer_seconds << " seconds.\n"; // // // Output FB as Image // std::cout << "P3\n" << nx << " " << ny << "\n255\n"; // for (int j = ny - 1; j >= 0; j--) { // for (int i = 0; i < nx; i++) { // size_t pixel_index = j*nx + i; // int ir = int(255.99*fb[pixel_index].r()); // int ig = int(255.99*fb[pixel_index].g()); // int ib = int(255.99*fb[pixel_index].b()); // std::cout << ir << " " << ig << " " << ib << "\n"; // } // } // // // clean up // checkCudaErrors(hipDeviceSynchronize()); // free_world << <1, 1 >> >(d_list, d_world, d_camera); // checkCudaErrors(hipGetLastError()); // checkCudaErrors(hipFree(d_camera)); // checkCudaErrors(hipFree(d_world)); // checkCudaErrors(hipFree(d_list)); // checkCudaErrors(hipFree(d_rand_state)); // checkCudaErrors(hipFree(fb)); // // hipDeviceReset(); //}
fdb4f5e9b34389386bdd78f87a4266ae4df6db4b.cu
//#include <iostream> //#include <time.h> //#include <float.h> //#include <curand_kernel.h> //#include "vec3.h" //#include "ray.h" //#include "sphere.h" //#include "hitable_list.h" //#include "camera.h" //#include "material.h" // //// limited version of checkCudaErrors from helper_cuda.h in CUDA examples //#define checkCudaErrors(val) check_cuda( (val), #val, __FILE__, __LINE__ ) // //void check_cuda(cudaError_t result, char const *const func, const char *const file, int const line) { // if (result) { // std::cerr << "CUDA error = " << static_cast<unsigned int>(result) << " at " << // file << ":" << line << " '" << func << "' \n"; // // Make sure we call CUDA Device Reset before exiting // cudaDeviceReset(); // exit(99); // } //} // //// Matching the C++ code would recurse enough into color() calls that //// it was blowing up the stack, so we have to turn this into a //// limited-depth loop instead. Later code in the book limits to a max //// depth of 50, so we adapt this a few chapters early on the GPU. //__device__ vec3 color(const ray& r, hitable **world, curandState *local_rand_state) { // ray cur_ray = r; // vec3 cur_attenuation = vec3(1.0, 1.0, 1.0); // for (int i = 0; i < 50; i++) { // hit_record rec; // if ((*world)->hit(cur_ray, 0.001f, FLT_MAX, rec)) { // ray scattered; // vec3 attenuation; // if (rec.mat_ptr->scatter(cur_ray, rec, attenuation, scattered, local_rand_state)) { // cur_attenuation *= attenuation; // cur_ray = scattered; // } // else { // return vec3(0.0, 0.0, 0.0); // } // } // else { // vec3 unit_direction = unit_vector(cur_ray.direction()); // float t = 0.5f*(unit_direction.y() + 1.0f); // vec3 c = (1.0f - t)*vec3(1.0, 1.0, 1.0) + t*vec3(0.5, 0.7, 1.0); // return cur_attenuation * c; // } // } // return vec3(0.0, 0.0, 0.0); // exceeded recursion //} // //__global__ void render_init(int max_x, int max_y, curandState *rand_state) { // int i = threadIdx.x + blockIdx.x * blockDim.x; // int j = threadIdx.y + blockIdx.y * blockDim.y; // if ((i >= max_x) || (j >= max_y)) return; // int pixel_index = j*max_x + i; // //Each thread gets same seed, a different sequence number, no offset // curand_init(1984, pixel_index, 0, &rand_state[pixel_index]); //} // //__global__ void render(vec3 *fb, int max_x, int max_y, int ns, camera **cam, hitable **world, curandState *rand_state) { // int i = threadIdx.x + blockIdx.x * blockDim.x; // int j = threadIdx.y + blockIdx.y * blockDim.y; // if ((i >= max_x) || (j >= max_y)) return; // int pixel_index = j*max_x + i; // curandState local_rand_state = rand_state[pixel_index]; // vec3 col(0, 0, 0); // for (int s = 0; s < ns; s++) { // float u = float(i + curand_uniform(&local_rand_state)) / float(max_x); // float v = float(j + curand_uniform(&local_rand_state)) / float(max_y); // ray r = (*cam)->get_ray(u, v); // col += color(r, world, &local_rand_state); // } // rand_state[pixel_index] = local_rand_state; // col /= float(ns); // col[0] = sqrt(col[0]); // col[1] = sqrt(col[1]); // col[2] = sqrt(col[2]); // fb[pixel_index] = col; //} // //__global__ void create_world(hitable **d_list, hitable **d_world, camera **d_camera, int nx, int ny) { // if (threadIdx.x == 0 && blockIdx.x == 0) { // d_list[0] = new sphere(vec3(0, 0, -1), 0.5, // new lambertian(vec3(0.1, 0.2, 0.5))); // d_list[1] = new sphere(vec3(0, -100.5, -1), 100, // new lambertian(vec3(0.8, 0.8, 0.0))); // d_list[2] = new sphere(vec3(1, 0, -1), 0.5, // new metal(vec3(0.8, 0.6, 0.2), 0.0)); // d_list[3] = new sphere(vec3(-1, 0, -1), 0.5, // new dielectric(1.5)); // d_list[4] = new sphere(vec3(-1, 0, -1), -0.45, // new dielectric(1.5)); // *d_world = new hitable_list(d_list, 5); // *d_camera = new camera(vec3(-2, 2, 1), // vec3(0, 0, -1), // vec3(0, 1, 0), // 20.0, // float(nx) / float(ny)); // } //} // //__global__ void free_world(hitable **d_list, hitable **d_world, camera **d_camera) { // for (int i = 0; i < 5; i++) { // delete ((sphere *)d_list[i])->mat_ptr; // delete d_list[i]; // } // delete *d_world; // delete *d_camera; //} // //int main() { // int nx = 1200; // int ny = 600; // int ns = 100; // int tx = 8; // int ty = 8; // // std::cerr << "Rendering a " << nx << "x" << ny << " image with " << ns << " samples per pixel "; // std::cerr << "in " << tx << "x" << ty << " blocks.\n"; // // int num_pixels = nx*ny; // size_t fb_size = num_pixels * sizeof(vec3); // // // allocate FB // vec3 *fb; // checkCudaErrors(cudaMallocManaged((void **)&fb, fb_size)); // // // allocate random state // curandState *d_rand_state; // checkCudaErrors(cudaMalloc((void **)&d_rand_state, num_pixels * sizeof(curandState))); // // // make our world of hitables & the camera // hitable **d_list; // checkCudaErrors(cudaMalloc((void **)&d_list, 5 * sizeof(hitable *))); // hitable **d_world; // checkCudaErrors(cudaMalloc((void **)&d_world, sizeof(hitable *))); // camera **d_camera; // checkCudaErrors(cudaMalloc((void **)&d_camera, sizeof(camera *))); // create_world << <1, 1 >> >(d_list, d_world, d_camera, nx, ny); // checkCudaErrors(cudaGetLastError()); // checkCudaErrors(cudaDeviceSynchronize()); // // clock_t start, stop; // start = clock(); // // Render our buffer // dim3 blocks(nx / tx + 1, ny / ty + 1); // dim3 threads(tx, ty); // render_init << <blocks, threads >> >(nx, ny, d_rand_state); // checkCudaErrors(cudaGetLastError()); // checkCudaErrors(cudaDeviceSynchronize()); // render << <blocks, threads >> >(fb, nx, ny, ns, d_camera, d_world, d_rand_state); // checkCudaErrors(cudaGetLastError()); // checkCudaErrors(cudaDeviceSynchronize()); // stop = clock(); // double timer_seconds = ((double)(stop - start)) / CLOCKS_PER_SEC; // std::cerr << "took " << timer_seconds << " seconds.\n"; // // // Output FB as Image // std::cout << "P3\n" << nx << " " << ny << "\n255\n"; // for (int j = ny - 1; j >= 0; j--) { // for (int i = 0; i < nx; i++) { // size_t pixel_index = j*nx + i; // int ir = int(255.99*fb[pixel_index].r()); // int ig = int(255.99*fb[pixel_index].g()); // int ib = int(255.99*fb[pixel_index].b()); // std::cout << ir << " " << ig << " " << ib << "\n"; // } // } // // // clean up // checkCudaErrors(cudaDeviceSynchronize()); // free_world << <1, 1 >> >(d_list, d_world, d_camera); // checkCudaErrors(cudaGetLastError()); // checkCudaErrors(cudaFree(d_camera)); // checkCudaErrors(cudaFree(d_world)); // checkCudaErrors(cudaFree(d_list)); // checkCudaErrors(cudaFree(d_rand_state)); // checkCudaErrors(cudaFree(fb)); // // cudaDeviceReset(); //}
67b1297df1945671150e89b9cac9d025efa172b9.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime_api.h> #include "dummy.h" namespace other_ns { template<> void Dummy<::dali::GPUBackend>::RunImpl(::dali::DeviceWorkspace *ws, const int idx) { const auto &input = ws->Input<::dali::GPUBackend>(idx); auto &output = ws->Output<::dali::GPUBackend>(idx); output.set_type(input.type()); output.ResizeLike(input); CUDA_CALL(hipMemcpyAsync( output.raw_mutable_data(), input.raw_data(), input.nbytes(), hipMemcpyDeviceToDevice, ws->stream())); } } // namespace other_ns DALI_REGISTER_OPERATOR(CustomDummy, ::other_ns::Dummy<::dali::GPUBackend>, ::dali::GPU);
67b1297df1945671150e89b9cac9d025efa172b9.cu
#include <cuda_runtime_api.h> #include "dummy.h" namespace other_ns { template<> void Dummy<::dali::GPUBackend>::RunImpl(::dali::DeviceWorkspace *ws, const int idx) { const auto &input = ws->Input<::dali::GPUBackend>(idx); auto &output = ws->Output<::dali::GPUBackend>(idx); output.set_type(input.type()); output.ResizeLike(input); CUDA_CALL(cudaMemcpyAsync( output.raw_mutable_data(), input.raw_data(), input.nbytes(), cudaMemcpyDeviceToDevice, ws->stream())); } } // namespace other_ns DALI_REGISTER_OPERATOR(CustomDummy, ::other_ns::Dummy<::dali::GPUBackend>, ::dali::GPU);
351c66437f39ac9d8d37e6f029cbd931ebf64a06.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "getValidForwardExtensionFromTheLastQ.h" __global__ void kernelPrintd_arr_V(struct_V *d_arr_V,int numberElementOf_d_arr_V){ int i = blockDim.x * blockIdx.x + threadIdx.x; if (i<numberElementOf_d_arr_V){ //if(d_arr_V[i].valid==1){ printf("\n Thread %d: valid: %d, d_backward: %d",i,d_arr_V[i].valid,d_arr_V[i].backward); //} } } __global__ void kernelCpyFromd_arr_V_to_d_arrValidV(struct_V *d_arr_V,int numberElementOf_d_arr_V,int *d_arrV){ int i = threadIdx.x + blockDim.x*blockIdx.x; if(i<numberElementOf_d_arr_V){ d_arrV[i]=d_arr_V[i].valid; } } __global__ void kernelFindValidForwardFromLastQ(struct_Q *device_arr_Q,int indexOfQ,cHistory **dH,int n,int *d_O,int *d_LO,int *d_N,int *d_LN,struct_V *d_arr_V,float *d_arr_degreeOfVerticesInQColumn, int maxOfVer,int m,Extension *d_arrE,int lastColumn){ int i = blockDim.x * blockIdx.x + threadIdx.x; if(i<n){ int minLabel = d_LO[device_arr_Q[0]._d_arr_Q[0].vid]; printf("\n minLabel: %d",minLabel); // diplay array dH /* //dH[i]->printmn(); printf("\n dh[%d]->m:%d",i,dH[i]->m); printf("\n dh[%d]->n:%d",i,dH[i]->n); for (int j = 0; j < dH[i]->n; j++) //display d_arr_HO { printf("\n dH[%d]->d_arr_HO[%d]:%d",i,j,dH[i]->d_arr_HO[j]); } for (int j = 0; j < dH[i]->m; j++) //display d_arr_HLN { printf("\n dH[%d]->d_arr_HLN[%d]:%d",i,j,dH[i]->d_arr_HLN[j]); } */ int vid = device_arr_Q[indexOfQ]._d_arr_Q[i].vid; //ly vid ca ct Q //int indexOfPrevQ = device_arr_Q[indexOfQ]._d_arr_Q[i].idx; //Tm thi khng ly index ca Q pha trc int degreeVid = __float2int_rn(d_arr_degreeOfVerticesInQColumn[i]); //ly bc ca vid , do bc l kiu float nn phi convert sang kiu int printf("\n Thread %d: vid:%d have degree: %d",i,vid,degreeVid); //Duyt qua cc nh k vi nh vid da vo s ln duyt l bc int indexToVidIndN=d_O[vid]; int labelFromVid = d_LO[vid]; int toVid; int labelToVid; for (int j = 0; j < degreeVid; j++,indexToVidIndN++) //Duyt qua tt c cc nh k vi nh vid, nu nh khng thuc embedding th --> cnh cng khng thuc embedding v y l Q cui { toVid=d_N[indexToVidIndN]; //Ly vid ca nh cn kim tra labelToVid = d_LO[toVid]; //ly label ca nh cn kim tra //printf("\nThread %d, j: %d has ToVidLabel:%d",i,j,labelToVid); //kim tra xem nh toVid tn ti trong embedding hay cha (khc zero l thuc embedding) int indexOfToVidInEmbedding=(toVid%maxOfVer); //printf("\n Thread %d, for j: %d, dH[%d]->d_arr_HO[%d]:%d",i,j,i,indexOfToVidInEmbedding,dH[i]->d_arr_HO[indexOfToVidInEmbedding]); if(dH[i]->d_arr_HO[indexOfToVidInEmbedding]==0){ //Nu gi tr tng ng trn Embedding bng zero th xt xem label ca n c tho ln hn hoc bng minLabel hay khng if(labelToVid>=minLabel){ //nu tho th s set mng V tng ng l 1 v ch nh n l forward int indexOfd_arr_V=i*m+j; int indexOfd_LN=indexToVidIndN+j; d_arr_V[indexOfd_arr_V].valid=1; //cp nht d liu cho mng d_arrE d_arrE[indexOfd_arr_V].vgi=vid; d_arrE[indexOfd_arr_V].vgj=toVid; d_arrE[indexOfd_arr_V].lij=d_LN[indexOfd_LN]; d_arrE[indexOfd_arr_V].li=labelFromVid; d_arrE[indexOfd_arr_V].lj=labelToVid; d_arrE[indexOfd_arr_V].vi=indexOfQ; d_arrE[indexOfd_arr_V].vj=indexOfQ+1; } } } } } __global__ void find_maximum_kernel(float *array, float *max, int *mutex, unsigned int n) { unsigned int index = threadIdx.x + blockIdx.x*blockDim.x; unsigned int stride = gridDim.x*blockDim.x; unsigned int offset = 0; __shared__ float cache[256]; float temp = -1.0; while(index + offset < n){ temp = fmaxf(temp, array[index + offset]); offset += stride; } cache[threadIdx.x] = temp; __syncthreads(); // reduction unsigned int i = blockDim.x/2; while(i != 0){ if(threadIdx.x < i){ cache[threadIdx.x] = fmaxf(cache[threadIdx.x], cache[threadIdx.x + i]); } __syncthreads(); i /= 2; } if(threadIdx.x == 0){ while(atomicCAS(mutex,0,1) != 0); //lock *max = fmaxf(*max, cache[0]); atomicExch(mutex, 0); //unlock } } __global__ void kernelFindDegreeOfVertex(int *d_O,int *d_N,int numberOfElementd_O,int numberOfElementd_N,struct_Q *device_arr_Q,int indexOfQ,int n,float *d_arr_degreeOfVerticesInQColumn,int maxOfVer){ int i = blockDim.x*blockIdx.x + threadIdx.x; if(i<n){ float degreeOfV =0; int nextVid; int graphid; int lastGraphId=(numberOfElementd_O-1)/maxOfVer; int vid =device_arr_Q[indexOfQ]._d_arr_Q[i].vid; if(d_O[vid]==-1){ printf("\ndevice_arr_Q is not correct, vertex id %vid is not exist in database"); return; } if (vid==numberOfElementd_O-1){ //nu nh y l nh cui cng trong d_O degreeOfV=numberOfElementd_N-d_O[vid]; //th bc ca nh vid chnh bng tng s cnh tr cho gi tr ca d_O[vid]. } else { nextVid = vid+1; //xt nh pha sau c khc 1 hay khng? graphid=vid/maxOfVer; if(d_O[nextVid]==-1 && graphid==lastGraphId){ degreeOfV=numberOfElementd_N-d_O[vid]; } else if(d_O[nextVid]==-1 && graphid!=lastGraphId){ nextVid=(graphid+1)*maxOfVer; degreeOfV=d_O[nextVid]-d_O[vid]; } else { degreeOfV=d_O[nextVid]-d_O[vid]; } } //printf("\nThread:%d : Degree of %d is %f",i,vid,degreeOfV); d_arr_degreeOfVerticesInQColumn[i]=degreeOfV; //printf("\nThread %d: d_arr_degreeOfVerticesInQColumn[%d]:%f",i,i,d_arr_degreeOfVerticesInQColumn[i]); } } hipError_t getValidForwardExtensionFromTheLastQ(Extension *&d_arrE,int &numberElement_d_arrE,struct_Q *device_arr_Q,int indexOfQ,cHistory **dH,int n,unsigned int maxOfVer,int *d_O,int *d_LO,int *d_N,int *d_LN,int numberOfElementd_O,int numberOfElementd_N){ hipError_t cudaStatus; dim3 block(1024); dim3 grid((n+block.x-1)/block.x); //1. Tm bc ln nht m ca cc vid thuc device_arr_Q[indexOfQ] ang xt. //1.1 Khi to mt mng s nguyn c kch thc bng s lng embedding float *d_arr_degreeOfVerticesInQColumn; cudaStatus = hipMalloc((void**)&d_arr_degreeOfVerticesInQColumn,n*sizeof(float)); if(cudaStatus!=hipSuccess){ fprintf(stderr,"\ncudaMalloc d_arr_degreeOfVerticeInQColumn failed"); goto Error; } else { hipMemset(d_arr_degreeOfVerticesInQColumn,0,n*sizeof(float)); } //1.2 Tnh bc ca cc nh vid trong Q column v lu vo d_arr_OfVerticeInQColumn hipLaunchKernelGGL(( kernelFindDegreeOfVertex), dim3(grid),dim3(block), 0, 0, d_O,d_N,numberOfElementd_O,numberOfElementd_N,device_arr_Q,indexOfQ,n,d_arr_degreeOfVerticesInQColumn,maxOfVer); hipDeviceSynchronize(); cudaStatus = hipGetLastError(); if(cudaStatus!=hipSuccess){ fprintf(stderr,"\ncudaDeviceSynchronize kernelFindDegreeOfVertex failed"); goto Error; } //2. Tm bc ln nht ca vid trong Q column chnh l tm gi tr ln nht trong mng d_arr_degreeOfVerticesInQColumn float *h_max; h_max = (float*)malloc(sizeof(float)); if(h_max==NULL){ printf("\nMalloc h_max failed"); exit(1); } float *d_max; int *d_mutex; cudaStatus=hipMalloc((void**)&d_max,sizeof(float)); if (cudaStatus!=hipSuccess){ fprintf(stderr,"\ncudaMalloc d_max failed"); goto Error; } else { hipMemset(d_max,0,sizeof(float)); } cudaStatus=hipMalloc((void**)&d_mutex,sizeof(int)); if (cudaStatus!=hipSuccess){ fprintf(stderr,"\ncudaMalloc d_mutex failed"); goto Error; } else { hipMemset(d_mutex,0,sizeof(int)); } dim3 gridSize = 256; dim3 blockSize = 256; hipLaunchKernelGGL(( find_maximum_kernel), dim3(gridSize), dim3(blockSize), 0, 0, d_arr_degreeOfVerticesInQColumn, d_max, d_mutex, n); hipDeviceSynchronize(); cudaStatus = hipGetLastError(); if(cudaStatus!=hipSuccess){ fprintf(stderr,"\ncudaDeviceSynchronize find_maximum_kernel failed"); goto Error; } // copy from device to host hipMemcpy(h_max, d_max, sizeof(float), hipMemcpyDeviceToHost); //report results int m = (int)(*h_max); //bc ln nht ca cc nh trong 1 ct Q printf("\nMax degree of vid in Q column is: %d",m); /* //3. To mng d_arr_V c kch thc: maxDegree_vid_Q * |Q| Lu , mng d_arr_V phi c dng cu trc th hin cnh m rng c hp l hay khng v l forward extension hay backward extension. struct struct_V { int valid; //default: 0, valid: 1 int backward; //default: 0- forward; backward: 1 } */ struct_V *d_arr_V; int numberElementOf_d_arr_V=m*n; cudaStatus=hipMalloc((void**)&d_arr_V,numberElementOf_d_arr_V*sizeof(struct_V)); if(cudaStatus!=hipSuccess){ fprintf(stderr,"\n hipMalloc d_arr_V failed"); goto Error; } else { hipMemset(d_arr_V,0,numberElementOf_d_arr_V*sizeof(struct_V)); } /* //4. Tm cc m rng ca vid v nh du nhng m rng hp l vo mng d_arr_V o Bc ca cc nh trong Q column c lu tr trong mng d_arr_degreeOfVerticesInQColumn--> chng ta khng cn tnh bc ca vid o cHistory c lu tr trong dH l mt cu trc gm mng d_HO v d_HLN cho bit cnh v nh thuc embedding o Thread th i s s dng cc phn t tng ng index_d_arr_V t [i*m,(i+1)*m - 1] o Mi ln lp bc ca vid th bin tm s tng ln 1 ch vng nh tng ng trn d_arr_V o Nu nh phi cng ca DFS_Code kt ni trc tip vi nh u tin ca DFS_Code th khng tn ti backward edge (ch ng trong n th v hng). */ //To mng d_arrE c s lng phn t bng vi mng d_arr_V lu vgi,vgj v nhn ca cnh m rng //Extension *d_arrE; d_arrE=nullptr; numberElement_d_arrE=numberElementOf_d_arr_V; cudaStatus = hipMalloc((void**)&d_arrE,numberElementOf_d_arr_V*sizeof(Extension)); if (cudaStatus!=hipSuccess){ fprintf(stderr,"\n hipMalloc d_arrE failed",cudaStatus); exit(1); } else { hipMemset(d_arrE,-1,numberElementOf_d_arr_V*sizeof(Extension)); } int lastColumn =1; //ch gn tm thi thi, ch n cn truyn t ngoi vo. hipLaunchKernelGGL(( kernelFindValidForwardFromLastQ), dim3(grid),dim3(block), 0, 0, device_arr_Q,indexOfQ,dH,n,d_O,d_LO,d_N,d_LN,d_arr_V,d_arr_degreeOfVerticesInQColumn,maxOfVer,m,d_arrE,lastColumn); hipDeviceSynchronize(); cudaStatus = hipGetLastError(); if(cudaStatus!=hipSuccess){ fprintf(stderr,"\ncudaDeviceSynchronize kernelFindValidForwardFromLastQ failed"); goto Error; } //Hin th kt qu mng d_arr_V vi s lng phn t numberElementOf_d_arr_V /*kernelPrintd_arr_V<<<1,numberElementOf_d_arr_V>>>(d_arr_V,numberElementOf_d_arr_V); hipDeviceSynchronize(); printfExtension(d_arrE,numberElementOf_d_arr_V); hipDeviceSynchronize();*/ //Scan mng d_arr_V thu c mng index d_arr_V_scanResult. T mng ny chng ta c c s to mng d_Ext chp kt qu t d_arr_E b vo d_Ext. //1. Trc tin, cn chp cc phn t valid t mng d_arr_V sang mng s nguyn d_arrValidV; int *d_arrValidV; cudaStatus = hipMalloc((void**)&d_arrValidV,numberElementOf_d_arr_V*sizeof(int)); if(cudaStatus!=hipSuccess){ fprintf(stderr,"\n hipMalloc d_arrValidV failed",cudaStatus); exit(1); } else { hipMemset(d_arrValidV,0,numberElementOf_d_arr_V*sizeof(int)); } dim3 blockb(512); dim3 gridb((numberElementOf_d_arr_V+blockb.x-1)/blockb.x); hipLaunchKernelGGL(( kernelCpyFromd_arr_V_to_d_arrValidV), dim3(gridb),dim3(blockb), 0, 0, d_arr_V,numberElementOf_d_arr_V,d_arrValidV); hipDeviceSynchronize(); int * d_arrValidV_scanResult; cudaStatus = hipMalloc((void**)&d_arrValidV_scanResult,numberElementOf_d_arr_V*sizeof(int)); if(cudaStatus!=hipSuccess){ fprintf(stderr,"\n hipMalloc d_arrValidV failed",cudaStatus); exit(1); } scanV(d_arrValidV,numberElementOf_d_arr_V,d_arrValidV_scanResult); //Hin th kt qu d_arrValidV_scanResult printInt(d_arrValidV_scanResult,numberElementOf_d_arr_V); hipDeviceSynchronize(); cudaStatus = hipGetLastError(); if(cudaStatus!=hipSuccess){ fprintf(stderr,"\ncudaDeviceSynchronize getValidExtensionFromEmbedding failed"); goto Error; } Error: return cudaStatus; }
351c66437f39ac9d8d37e6f029cbd931ebf64a06.cu
#include "getValidForwardExtensionFromTheLastQ.h" __global__ void kernelPrintd_arr_V(struct_V *d_arr_V,int numberElementOf_d_arr_V){ int i = blockDim.x * blockIdx.x + threadIdx.x; if (i<numberElementOf_d_arr_V){ //if(d_arr_V[i].valid==1){ printf("\n Thread %d: valid: %d, d_backward: %d",i,d_arr_V[i].valid,d_arr_V[i].backward); //} } } __global__ void kernelCpyFromd_arr_V_to_d_arrValidV(struct_V *d_arr_V,int numberElementOf_d_arr_V,int *d_arrV){ int i = threadIdx.x + blockDim.x*blockIdx.x; if(i<numberElementOf_d_arr_V){ d_arrV[i]=d_arr_V[i].valid; } } __global__ void kernelFindValidForwardFromLastQ(struct_Q *device_arr_Q,int indexOfQ,cHistory **dH,int n,int *d_O,int *d_LO,int *d_N,int *d_LN,struct_V *d_arr_V,float *d_arr_degreeOfVerticesInQColumn, int maxOfVer,int m,Extension *d_arrE,int lastColumn){ int i = blockDim.x * blockIdx.x + threadIdx.x; if(i<n){ int minLabel = d_LO[device_arr_Q[0]._d_arr_Q[0].vid]; printf("\n minLabel: %d",minLabel); // diplay array dH /* //dH[i]->printmn(); printf("\n dh[%d]->m:%d",i,dH[i]->m); printf("\n dh[%d]->n:%d",i,dH[i]->n); for (int j = 0; j < dH[i]->n; j++) //display d_arr_HO { printf("\n dH[%d]->d_arr_HO[%d]:%d",i,j,dH[i]->d_arr_HO[j]); } for (int j = 0; j < dH[i]->m; j++) //display d_arr_HLN { printf("\n dH[%d]->d_arr_HLN[%d]:%d",i,j,dH[i]->d_arr_HLN[j]); } */ int vid = device_arr_Q[indexOfQ]._d_arr_Q[i].vid; //lấy vid của cột Q //int indexOfPrevQ = device_arr_Q[indexOfQ]._d_arr_Q[i].idx; //Tạm thời không lấy index của Q phía trước int degreeVid = __float2int_rn(d_arr_degreeOfVerticesInQColumn[i]); //lấy bậc của vid đó, do bậc là kiểu float nên phải convert sang kiểu int printf("\n Thread %d: vid:%d have degree: %d",i,vid,degreeVid); //Duyệt qua các đỉnh kề với đỉnh vid dựa vào số lần duyệt là bậc int indexToVidIndN=d_O[vid]; int labelFromVid = d_LO[vid]; int toVid; int labelToVid; for (int j = 0; j < degreeVid; j++,indexToVidIndN++) //Duyệt qua tất cả các đỉnh kề với đỉnh vid, nếu đỉnh không thuộc embedding thì --> cạnh cũng không thuộc embedding vì đây là Q cuối { toVid=d_N[indexToVidIndN]; //Lấy vid của đỉnh cần kiểm tra labelToVid = d_LO[toVid]; //lấy label của đỉnh cần kiểm tra //printf("\nThread %d, j: %d has ToVidLabel:%d",i,j,labelToVid); //kiểm tra xem đỉnh toVid đã tồn tại trong embedding hay chưa (khác zero là thuộc embedding) int indexOfToVidInEmbedding=(toVid%maxOfVer); //printf("\n Thread %d, for j: %d, dH[%d]->d_arr_HO[%d]:%d",i,j,i,indexOfToVidInEmbedding,dH[i]->d_arr_HO[indexOfToVidInEmbedding]); if(dH[i]->d_arr_HO[indexOfToVidInEmbedding]==0){ //Nếu giá trị tương ứng trên Embedding bằng zero thì xét xem label của nó có thoả lớn hơn hoặc bằng minLabel hay không if(labelToVid>=minLabel){ //nếu thoả thì sẽ set mảng V tương ứng là 1 và chỉ định nó là forward int indexOfd_arr_V=i*m+j; int indexOfd_LN=indexToVidIndN+j; d_arr_V[indexOfd_arr_V].valid=1; //cập nhật dữ liệu cho mảng d_arrE d_arrE[indexOfd_arr_V].vgi=vid; d_arrE[indexOfd_arr_V].vgj=toVid; d_arrE[indexOfd_arr_V].lij=d_LN[indexOfd_LN]; d_arrE[indexOfd_arr_V].li=labelFromVid; d_arrE[indexOfd_arr_V].lj=labelToVid; d_arrE[indexOfd_arr_V].vi=indexOfQ; d_arrE[indexOfd_arr_V].vj=indexOfQ+1; } } } } } __global__ void find_maximum_kernel(float *array, float *max, int *mutex, unsigned int n) { unsigned int index = threadIdx.x + blockIdx.x*blockDim.x; unsigned int stride = gridDim.x*blockDim.x; unsigned int offset = 0; __shared__ float cache[256]; float temp = -1.0; while(index + offset < n){ temp = fmaxf(temp, array[index + offset]); offset += stride; } cache[threadIdx.x] = temp; __syncthreads(); // reduction unsigned int i = blockDim.x/2; while(i != 0){ if(threadIdx.x < i){ cache[threadIdx.x] = fmaxf(cache[threadIdx.x], cache[threadIdx.x + i]); } __syncthreads(); i /= 2; } if(threadIdx.x == 0){ while(atomicCAS(mutex,0,1) != 0); //lock *max = fmaxf(*max, cache[0]); atomicExch(mutex, 0); //unlock } } __global__ void kernelFindDegreeOfVertex(int *d_O,int *d_N,int numberOfElementd_O,int numberOfElementd_N,struct_Q *device_arr_Q,int indexOfQ,int n,float *d_arr_degreeOfVerticesInQColumn,int maxOfVer){ int i = blockDim.x*blockIdx.x + threadIdx.x; if(i<n){ float degreeOfV =0; int nextVid; int graphid; int lastGraphId=(numberOfElementd_O-1)/maxOfVer; int vid =device_arr_Q[indexOfQ]._d_arr_Q[i].vid; if(d_O[vid]==-1){ printf("\ndevice_arr_Q is not correct, vertex id %vid is not exist in database"); return; } if (vid==numberOfElementd_O-1){ //nếu như đây là đỉnh cuối cùng trong d_O degreeOfV=numberOfElementd_N-d_O[vid]; //thì bậc của đỉnh vid chính bằng tổng số cạnh trừ cho giá trị của d_O[vid]. } else { nextVid = vid+1; //xét đỉnh phía sau có khác 1 hay không? graphid=vid/maxOfVer; if(d_O[nextVid]==-1 && graphid==lastGraphId){ degreeOfV=numberOfElementd_N-d_O[vid]; } else if(d_O[nextVid]==-1 && graphid!=lastGraphId){ nextVid=(graphid+1)*maxOfVer; degreeOfV=d_O[nextVid]-d_O[vid]; } else { degreeOfV=d_O[nextVid]-d_O[vid]; } } //printf("\nThread:%d : Degree of %d is %f",i,vid,degreeOfV); d_arr_degreeOfVerticesInQColumn[i]=degreeOfV; //printf("\nThread %d: d_arr_degreeOfVerticesInQColumn[%d]:%f",i,i,d_arr_degreeOfVerticesInQColumn[i]); } } cudaError_t getValidForwardExtensionFromTheLastQ(Extension *&d_arrE,int &numberElement_d_arrE,struct_Q *device_arr_Q,int indexOfQ,cHistory **dH,int n,unsigned int maxOfVer,int *d_O,int *d_LO,int *d_N,int *d_LN,int numberOfElementd_O,int numberOfElementd_N){ cudaError_t cudaStatus; dim3 block(1024); dim3 grid((n+block.x-1)/block.x); //1. Tìm bậc lớn nhất m của các vid thuộc device_arr_Q[indexOfQ] đang xét. //1.1 Khởi tạo một mảng số nguyên có kích thước bằng số lượng embedding float *d_arr_degreeOfVerticesInQColumn; cudaStatus = cudaMalloc((void**)&d_arr_degreeOfVerticesInQColumn,n*sizeof(float)); if(cudaStatus!=cudaSuccess){ fprintf(stderr,"\ncudaMalloc d_arr_degreeOfVerticeInQColumn failed"); goto Error; } else { cudaMemset(d_arr_degreeOfVerticesInQColumn,0,n*sizeof(float)); } //1.2 Tính bậc của các đỉnh vid trong Q column và lưu vào d_arr_OfVerticeInQColumn kernelFindDegreeOfVertex<<<grid,block>>>(d_O,d_N,numberOfElementd_O,numberOfElementd_N,device_arr_Q,indexOfQ,n,d_arr_degreeOfVerticesInQColumn,maxOfVer); cudaDeviceSynchronize(); cudaStatus = cudaGetLastError(); if(cudaStatus!=cudaSuccess){ fprintf(stderr,"\ncudaDeviceSynchronize kernelFindDegreeOfVertex failed"); goto Error; } //2. Tìm bậc lớn nhất của vid trong Q column chính là tìm giá trị lớn nhất trong mảng d_arr_degreeOfVerticesInQColumn float *h_max; h_max = (float*)malloc(sizeof(float)); if(h_max==NULL){ printf("\nMalloc h_max failed"); exit(1); } float *d_max; int *d_mutex; cudaStatus=cudaMalloc((void**)&d_max,sizeof(float)); if (cudaStatus!=cudaSuccess){ fprintf(stderr,"\ncudaMalloc d_max failed"); goto Error; } else { cudaMemset(d_max,0,sizeof(float)); } cudaStatus=cudaMalloc((void**)&d_mutex,sizeof(int)); if (cudaStatus!=cudaSuccess){ fprintf(stderr,"\ncudaMalloc d_mutex failed"); goto Error; } else { cudaMemset(d_mutex,0,sizeof(int)); } dim3 gridSize = 256; dim3 blockSize = 256; find_maximum_kernel<<<gridSize, blockSize>>>(d_arr_degreeOfVerticesInQColumn, d_max, d_mutex, n); cudaDeviceSynchronize(); cudaStatus = cudaGetLastError(); if(cudaStatus!=cudaSuccess){ fprintf(stderr,"\ncudaDeviceSynchronize find_maximum_kernel failed"); goto Error; } // copy from device to host cudaMemcpy(h_max, d_max, sizeof(float), cudaMemcpyDeviceToHost); //report results int m = (int)(*h_max); //bậc lớn nhất của các đỉnh trong 1 cột Q printf("\nMax degree of vid in Q column is: %d",m); /* //3. Tạo mảng d_arr_V có kích thước: maxDegree_vid_Q * |Q| Lưu ý, mảng d_arr_V phải có dạng cấu trúc đủ thể hiện cạnh mở rộng có hợp lệ hay không và là forward extension hay backward extension. struct struct_V { int valid; //default: 0, valid: 1 int backward; //default: 0- forward; backward: 1 } */ struct_V *d_arr_V; int numberElementOf_d_arr_V=m*n; cudaStatus=cudaMalloc((void**)&d_arr_V,numberElementOf_d_arr_V*sizeof(struct_V)); if(cudaStatus!=cudaSuccess){ fprintf(stderr,"\n cudaMalloc d_arr_V failed"); goto Error; } else { cudaMemset(d_arr_V,0,numberElementOf_d_arr_V*sizeof(struct_V)); } /* //4. Tìm các mở rộng của vid và đánh dấu những mở rộng hợp lệ vào mảng d_arr_V o Bậc của các đỉnh trong Q column được lưu trữ trong mảng d_arr_degreeOfVerticesInQColumn--> chúng ta không cần tính bậc của vid o cHistory được lưu trữ trong dH là một cấu trúc gồm mảng d_HO và d_HLN cho biết cạnh và đỉnh đã thuộc embedding o Thread thứ i sẽ sử dụng các phần tử tương ứng index_d_arr_V từ [i*m,(i+1)*m - 1] o Mỗi lần lặp bậc của vid thì biến tạm sẽ tăng lên 1 để chỉ vùng nhớ tương ứng trên d_arr_V o Nếu đỉnh phải cùng của DFS_Code kết nối trực tiếp với đỉnh đầu tiên của DFS_Code thì không tồn tại backward edge (chỉ đúng trong đơn đồ thị vô hướng). */ //Tạo mảng d_arrE có số lượng phần tử bằng với mảng d_arr_V để lưu vgi,vgj và nhãn của cạnh mở rộng //Extension *d_arrE; d_arrE=nullptr; numberElement_d_arrE=numberElementOf_d_arr_V; cudaStatus = cudaMalloc((void**)&d_arrE,numberElementOf_d_arr_V*sizeof(Extension)); if (cudaStatus!=cudaSuccess){ fprintf(stderr,"\n cudaMalloc d_arrE failed",cudaStatus); exit(1); } else { cudaMemset(d_arrE,-1,numberElementOf_d_arr_V*sizeof(Extension)); } int lastColumn =1; //chỉ gán tạm thời thôi, chứ nó cần truyền từ ngoài vào. kernelFindValidForwardFromLastQ<<<grid,block>>>(device_arr_Q,indexOfQ,dH,n,d_O,d_LO,d_N,d_LN,d_arr_V,d_arr_degreeOfVerticesInQColumn,maxOfVer,m,d_arrE,lastColumn); cudaDeviceSynchronize(); cudaStatus = cudaGetLastError(); if(cudaStatus!=cudaSuccess){ fprintf(stderr,"\ncudaDeviceSynchronize kernelFindValidForwardFromLastQ failed"); goto Error; } //Hiển thị kết quả mảng d_arr_V với số lượng phần tử numberElementOf_d_arr_V /*kernelPrintd_arr_V<<<1,numberElementOf_d_arr_V>>>(d_arr_V,numberElementOf_d_arr_V); cudaDeviceSynchronize(); printfExtension(d_arrE,numberElementOf_d_arr_V); cudaDeviceSynchronize();*/ //Scan mảng d_arr_V để thu được mảng index d_arr_V_scanResult. Từ mảng này chúng ta có cơ sở để tạo mảng d_Ext để chép kết quả từ d_arr_E bỏ vào d_Ext. //1. Trước tiên, cần chép các phần tử valid từ mảng d_arr_V sang mảng số nguyên d_arrValidV; int *d_arrValidV; cudaStatus = cudaMalloc((void**)&d_arrValidV,numberElementOf_d_arr_V*sizeof(int)); if(cudaStatus!=cudaSuccess){ fprintf(stderr,"\n cudaMalloc d_arrValidV failed",cudaStatus); exit(1); } else { cudaMemset(d_arrValidV,0,numberElementOf_d_arr_V*sizeof(int)); } dim3 blockb(512); dim3 gridb((numberElementOf_d_arr_V+blockb.x-1)/blockb.x); kernelCpyFromd_arr_V_to_d_arrValidV<<<gridb,blockb>>>(d_arr_V,numberElementOf_d_arr_V,d_arrValidV); cudaDeviceSynchronize(); int * d_arrValidV_scanResult; cudaStatus = cudaMalloc((void**)&d_arrValidV_scanResult,numberElementOf_d_arr_V*sizeof(int)); if(cudaStatus!=cudaSuccess){ fprintf(stderr,"\n cudaMalloc d_arrValidV failed",cudaStatus); exit(1); } scanV(d_arrValidV,numberElementOf_d_arr_V,d_arrValidV_scanResult); //Hiển thị kết quả d_arrValidV_scanResult printInt(d_arrValidV_scanResult,numberElementOf_d_arr_V); cudaDeviceSynchronize(); cudaStatus = cudaGetLastError(); if(cudaStatus!=cudaSuccess){ fprintf(stderr,"\ncudaDeviceSynchronize getValidExtensionFromEmbedding failed"); goto Error; } Error: return cudaStatus; }
3938f9625274d6ab94a626e46b47a2dc44e61b3a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <math.h> // Kernel function to add the elements of two arrays __global__ void add(int n, float *x, float *y) { for (int i = 0; i < n; i++) y[i] = x[i] * y[i]; } int main(void) { int N = 1<<20; float *x, *y; // Allocate Unified Memory accessible from CPU or GPU hipMallocManaged(&x, N*sizeof(float)); hipMallocManaged(&y, N*sizeof(float)); // initialize x and y arrays on the host for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 4.5f; } // Run kernel on 1M elements on the GPU hipLaunchKernelGGL(( add), dim3(1), dim3(1), 0, 0, N, x, y); // Wait for GPU to finish before accessing on host hipDeviceSynchronize(); // Check for errors (all values should be 3.0f) float maxError = 0.0f; for (int i = 0; i < N; i++) maxError = fmax(maxError, fabs(y[i])); std::cout << "Max error: " << maxError << std::endl; // Free memory hipFree(x); hipFree(y); return 0; }
3938f9625274d6ab94a626e46b47a2dc44e61b3a.cu
#include <iostream> #include <math.h> // Kernel function to add the elements of two arrays __global__ void add(int n, float *x, float *y) { for (int i = 0; i < n; i++) y[i] = x[i] * y[i]; } int main(void) { int N = 1<<20; float *x, *y; // Allocate Unified Memory – accessible from CPU or GPU cudaMallocManaged(&x, N*sizeof(float)); cudaMallocManaged(&y, N*sizeof(float)); // initialize x and y arrays on the host for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 4.5f; } // Run kernel on 1M elements on the GPU add<<<1, 1>>>(N, x, y); // Wait for GPU to finish before accessing on host cudaDeviceSynchronize(); // Check for errors (all values should be 3.0f) float maxError = 0.0f; for (int i = 0; i < N; i++) maxError = fmax(maxError, fabs(y[i])); std::cout << "Max error: " << maxError << std::endl; // Free memory cudaFree(x); cudaFree(y); return 0; }
94daf5b7dcf87af005a16c074a1984936207217c.hip
// !!! This is a file automatically generated by hipify!!! #include "header.h" #include <iostream> #include <string> #include <fstream> #include<stdlib.h> #include <stdio.h> #include<time.h> #include<hip/device_functions.h> #include<cuda.h> #include<math.h> using namespace std; void find_max(){ } void read_by_type(float *mnist_img, int *mnist_label, float *output_array, int type, int total_num, int *result_size){ int count = 0; int total_pixel = 28*28; for(int i=0;i<total_num;i++){ if (mnist_label[i]==type){ for(int j=0;j<total_pixel;j++){ output_array[count*total_pixel+j] = mnist_img[i*total_pixel+j]; } count ++; } } result_size[0] = count; printf("number_of_%d_img_is:%d\n",type, count); } void MNIST_labeling(string input_file_starter, int size, float *input_array_1, int *input_array_2, float *output_array_1, int *output_array_2, int main_neuron_num, int function_select, int function_select_2){ if (function_select == 0){ int *fire_count = new int[main_neuron_num]; string output_file_name = "MNIST_labeled_data.csv"; }else if (function_select == 1){ read_by_type(input_array_1, input_array_2, output_array_1, function_select_2, size, output_array_2); } }
94daf5b7dcf87af005a16c074a1984936207217c.cu
#include "header.h" #include <iostream> #include <string> #include <fstream> #include<stdlib.h> #include <stdio.h> #include<time.h> #include<device_functions.h> #include<cuda.h> #include<math.h> using namespace std; void find_max(){ } void read_by_type(float *mnist_img, int *mnist_label, float *output_array, int type, int total_num, int *result_size){ int count = 0; int total_pixel = 28*28; for(int i=0;i<total_num;i++){ if (mnist_label[i]==type){ for(int j=0;j<total_pixel;j++){ output_array[count*total_pixel+j] = mnist_img[i*total_pixel+j]; } count ++; } } result_size[0] = count; printf("number_of_%d_img_is:%d\n",type, count); } void MNIST_labeling(string input_file_starter, int size, float *input_array_1, int *input_array_2, float *output_array_1, int *output_array_2, int main_neuron_num, int function_select, int function_select_2){ if (function_select == 0){ int *fire_count = new int[main_neuron_num]; string output_file_name = "MNIST_labeled_data.csv"; }else if (function_select == 1){ read_by_type(input_array_1, input_array_2, output_array_1, function_select_2, size, output_array_2); } }
d8564cf9c6d02563f8e318928dbb1e076534331c.hip
// !!! This is a file automatically generated by hipify!!! #ifndef __HIPCC__ #define __HIPCC__ #endif #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <hip/hip_runtime.h> #include <hip/device_functions.h> #include <hip/hip_runtime_api.h> #include <stdio.h> __global__ void mapOverlap1D(int * in, int * out, int ray, int size) { __shared__ int tmp[9]; int index = blockIdx.x * blockDim.x + threadIdx.x; int sIndex = threadIdx.x + ray; if (threadIdx.x < ray) { if (index - ray > 0){ tmp[sIndex - ray] = in[index - ray]; } else{ tmp[sIndex - ray] = in[size - ray + threadIdx.x]; } if (index + blockDim.x >= size) tmp[sIndex + blockDim.x - ray] = in[threadIdx.x]; else tmp[sIndex + blockDim.x - ray] = in[index + blockDim.x]; } tmp[sIndex] = in[index]; __syncthreads(); int result = 0; for (int i=-ray; i <= ray; i++) { result += tmp[sIndex + i]; } out[index] = result; } int main() { int * host_in = NULL; int * host_out = NULL; int * kernel_in = NULL; int * kernel_out = NULL; int ray = 2; int N = 10; host_in = (int*)malloc(N*sizeof(int)); host_out = (int*)malloc(N*sizeof(int)); hipMalloc(&kernel_in, N*sizeof(int)); hipMalloc(&kernel_out, N*sizeof(int)); for (int i = 0; i < N; i++) { host_in[i] = 1+i; } hipMemcpy(kernel_in, host_in, N*sizeof(int), hipMemcpyHostToDevice); mapOverlap1D << < 2, 5 >> > (kernel_in, kernel_out, ray, 10); hipMemcpy(host_out, kernel_out, N*sizeof(int), hipMemcpyDeviceToHost); int res = 0; for (int i = 0; i < N; i++) { printf("%i ", host_out[i]); res += host_out[i]; } printf("\n"); for (int i = 0; i < N; i++) { printf("%i ", host_in[i]); res += host_in[i]; } printf("\n"); printf("Res = %i\n", res); system("PAUSE"); return 0; }
d8564cf9c6d02563f8e318928dbb1e076534331c.cu
#ifndef __CUDACC__ #define __CUDACC__ #endif #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <cuda.h> #include <device_functions.h> #include <cuda_runtime_api.h> #include <stdio.h> __global__ void mapOverlap1D(int * in, int * out, int ray, int size) { __shared__ int tmp[9]; int index = blockIdx.x * blockDim.x + threadIdx.x; int sIndex = threadIdx.x + ray; if (threadIdx.x < ray) { if (index - ray > 0){ tmp[sIndex - ray] = in[index - ray]; } else{ tmp[sIndex - ray] = in[size - ray + threadIdx.x]; } if (index + blockDim.x >= size) tmp[sIndex + blockDim.x - ray] = in[threadIdx.x]; else tmp[sIndex + blockDim.x - ray] = in[index + blockDim.x]; } tmp[sIndex] = in[index]; __syncthreads(); int result = 0; for (int i=-ray; i <= ray; i++) { result += tmp[sIndex + i]; } out[index] = result; } int main() { int * host_in = NULL; int * host_out = NULL; int * kernel_in = NULL; int * kernel_out = NULL; int ray = 2; int N = 10; host_in = (int*)malloc(N*sizeof(int)); host_out = (int*)malloc(N*sizeof(int)); cudaMalloc(&kernel_in, N*sizeof(int)); cudaMalloc(&kernel_out, N*sizeof(int)); for (int i = 0; i < N; i++) { host_in[i] = 1+i; } cudaMemcpy(kernel_in, host_in, N*sizeof(int), cudaMemcpyHostToDevice); mapOverlap1D << < 2, 5 >> > (kernel_in, kernel_out, ray, 10); cudaMemcpy(host_out, kernel_out, N*sizeof(int), cudaMemcpyDeviceToHost); int res = 0; for (int i = 0; i < N; i++) { printf("%i ", host_out[i]); res += host_out[i]; } printf("\n"); for (int i = 0; i < N; i++) { printf("%i ", host_in[i]); res += host_in[i]; } printf("\n"); printf("Res = %i\n", res); system("PAUSE"); return 0; }
656c8cc0f6d685aeb743f76dcae3acad3f143921.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "FloorKernel_naive.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const float *A = NULL; hipMalloc(&A, XSIZE*YSIZE); int Acount = 1; int Acols = 1; float *out0 = NULL; hipMalloc(&out0, XSIZE*YSIZE); int out0count = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( FloorKernel_naive), dim3(gridBlock),dim3(threadBlock), 0, 0, A,Acount,Acols,out0,out0count); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( FloorKernel_naive), dim3(gridBlock),dim3(threadBlock), 0, 0, A,Acount,Acols,out0,out0count); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( FloorKernel_naive), dim3(gridBlock),dim3(threadBlock), 0, 0, A,Acount,Acols,out0,out0count); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
656c8cc0f6d685aeb743f76dcae3acad3f143921.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "FloorKernel_naive.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const float *A = NULL; cudaMalloc(&A, XSIZE*YSIZE); int Acount = 1; int Acols = 1; float *out0 = NULL; cudaMalloc(&out0, XSIZE*YSIZE); int out0count = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); FloorKernel_naive<<<gridBlock,threadBlock>>>(A,Acount,Acols,out0,out0count); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { FloorKernel_naive<<<gridBlock,threadBlock>>>(A,Acount,Acols,out0,out0count); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { FloorKernel_naive<<<gridBlock,threadBlock>>>(A,Acount,Acols,out0,out0count); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
77fc8ff81cd4b400b4d81c332c2b713f65cef6f0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <iostream> #include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #include <time.h> #include <intrin.h> #include <ctime> #include <chrono> #include <ratio> #pragma comment(lib, "cudart") #define N 4096 #define THREADS_PER_BLOCK 1024 __global__ void reduceBase(int *g_idata, int *g_odata) { extern __shared__ int sdata[]; // each thread loads one element from global to shared mem unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; sdata[tid] = g_idata[i]; __syncthreads(); // do reduction in shared mem for (unsigned int s = 1; s < blockDim.x; s *= 2) { int index = 2 * s * tid; if (index < blockDim.x) { sdata[index] += sdata[index + s]; } __syncthreads(); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = sdata[0]; } //__global__ void reduceBase(int *g_idata, int *g_odata) { // extern __shared__ int sdata[]; // // each thread loads one element from global to shared mem // unsigned int tid = threadIdx.x; // unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; // sdata[tid] = g_idata[i]; // __syncthreads(); // // do reduction in shared mem // for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) { // if (tid < s) { // sdata[tid] += sdata[tid + s]; // } // __syncthreads(); // } // // write result for this block to global mem // if (tid == 0) g_odata[blockIdx.x] = sdata[0]; //} int main() { int sumCPU = 0; int *a, *c; // host copies of a, c int *d_a, *d_c; // device copies of a, c size_t size = N * sizeof(int); // Alloc space for device copies of a, c hipMalloc(&d_a, size); hipMalloc(&d_c, size); // Alloc space for host copies of a, c a = (int*)malloc(size); c = (int*)malloc(size); // Random input array init srand((time(NULL))); for (int i = 0; i < N; i++) { a[i] = rand() % 100000; } // Compute on CPU using namespace std::chrono; high_resolution_clock::time_point t1 = high_resolution_clock::now(); for (int i = 0; i < N; i++) { sumCPU += a[i]; } high_resolution_clock::time_point t2 = high_resolution_clock::now(); duration<double> time_span = duration_cast<duration<double>>(t2 - t1); std::cout << "CPU time: " << time_span.count() << " seconds." << "\n"; hipEvent_t start, stop; // hipEventCreate(&start); // hipEventCreate(&stop); // hipEventRecord(start, 0); // Copy input to device hipMemcpy(d_a, a, size, hipMemcpyHostToDevice); //int blocks = N / THREADS_PER_BLOCK; //int toDo = 0; //int maxThreads = THREADS_PER_BLOCK; //threads per block //int threads = (N < maxThreads) ? N : maxThreads; //if (blocks > 1) toDo = 1 + blocks / maxThreads; //else toDo = 0; //for (int i = 0; i < toDo; i++) { // threads = (blocks < maxThreads) ? blocks : maxThreads; // blocks = blocks / threads; // dim3 dimBlock(threads, 1, 1); // dim3 dimGrid(blocks, 1, 1); // Launch reduceBase() kernel on GPU // reduceBase << < dimGrid, dimBlock, THREADS_PER_BLOCK * sizeof(int) >> > (d_a, d_c); //reduceBase << <N / THREADS_PER_BLOCK, THREADS_PER_BLOCK, THREADS_PER_BLOCK * sizeof(int) >> > (d_a, d_c); //} // Copy result back to host reduceBase << <N / THREADS_PER_BLOCK, THREADS_PER_BLOCK, THREADS_PER_BLOCK * sizeof(int) >> > (d_a, d_c); hipMemcpy(c, d_c, N*sizeof(int), hipMemcpyDeviceToHost); // hipEventRecord(stop, 0); // GPU hipEventSynchronize(stop); float elapsedTime; // elapsedTime - hipEventElapsedTime(&elapsedTime, start, stop); // hipEventDestroy(start); hipEventDestroy(stop); printf("GPU time: %lf seconds\n", (double)(elapsedTime) / CLOCKS_PER_SEC); // Print results std::cout << "\nGPU result: " << c[0]+c[1] << "\n" << "CPU result: " << sumCPU << "\n"; // Cleanup free(a); free(c); hipFree(d_a); hipFree(d_c); return 0; }
77fc8ff81cd4b400b4d81c332c2b713f65cef6f0.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <iostream> #include <cuda_runtime.h> #include <stdio.h> #include <stdlib.h> #include <time.h> #include <intrin.h> #include <ctime> #include <chrono> #include <ratio> #pragma comment(lib, "cudart") #define N 4096 #define THREADS_PER_BLOCK 1024 __global__ void reduceBase(int *g_idata, int *g_odata) { extern __shared__ int sdata[]; // each thread loads one element from global to shared mem unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; sdata[tid] = g_idata[i]; __syncthreads(); // do reduction in shared mem for (unsigned int s = 1; s < blockDim.x; s *= 2) { int index = 2 * s * tid; if (index < blockDim.x) { sdata[index] += sdata[index + s]; } __syncthreads(); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = sdata[0]; } //__global__ void reduceBase(int *g_idata, int *g_odata) { // extern __shared__ int sdata[]; // // each thread loads one element from global to shared mem // unsigned int tid = threadIdx.x; // unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; // sdata[tid] = g_idata[i]; // __syncthreads(); // // do reduction in shared mem // for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) { // if (tid < s) { // sdata[tid] += sdata[tid + s]; // } // __syncthreads(); // } // // write result for this block to global mem // if (tid == 0) g_odata[blockIdx.x] = sdata[0]; //} int main() { int sumCPU = 0; int *a, *c; // host copies of a, c int *d_a, *d_c; // device copies of a, c size_t size = N * sizeof(int); // Alloc space for device copies of a, c cudaMalloc(&d_a, size); cudaMalloc(&d_c, size); // Alloc space for host copies of a, c a = (int*)malloc(size); c = (int*)malloc(size); // Random input array init srand((time(NULL))); for (int i = 0; i < N; i++) { a[i] = rand() % 100000; } // Compute on CPU using namespace std::chrono; high_resolution_clock::time_point t1 = high_resolution_clock::now(); for (int i = 0; i < N; i++) { sumCPU += a[i]; } high_resolution_clock::time_point t2 = high_resolution_clock::now(); duration<double> time_span = duration_cast<duration<double>>(t2 - t1); std::cout << "CPU time: " << time_span.count() << " seconds." << "\n"; cudaEvent_t start, stop; // создание события для точки старта cudaEventCreate(&start); // создание события для точки завершения cudaEventCreate(&stop); // точка начала замера времени cudaEventRecord(start, 0); // Copy input to device cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice); //int blocks = N / THREADS_PER_BLOCK; //int toDo = 0; //int maxThreads = THREADS_PER_BLOCK; //threads per block //int threads = (N < maxThreads) ? N : maxThreads; //if (blocks > 1) toDo = 1 + blocks / maxThreads; //else toDo = 0; //for (int i = 0; i < toDo; i++) { // threads = (blocks < maxThreads) ? blocks : maxThreads; // blocks = blocks / threads; // dim3 dimBlock(threads, 1, 1); // dim3 dimGrid(blocks, 1, 1); // Launch reduceBase() kernel on GPU // reduceBase << < dimGrid, dimBlock, THREADS_PER_BLOCK * sizeof(int) >> > (d_a, d_c); //reduceBase << <N / THREADS_PER_BLOCK, THREADS_PER_BLOCK, THREADS_PER_BLOCK * sizeof(int) >> > (d_a, d_c); //} // Copy result back to host reduceBase << <N / THREADS_PER_BLOCK, THREADS_PER_BLOCK, THREADS_PER_BLOCK * sizeof(int) >> > (d_a, d_c); cudaMemcpy(c, d_c, N*sizeof(int), cudaMemcpyDeviceToHost); // точка завершения замера времени cudaEventRecord(stop, 0); // ожидание завершения выполнения задач на GPU cudaEventSynchronize(stop); float elapsedTime; // elapsedTime - затраченное время в миллисекундах cudaEventElapsedTime(&elapsedTime, start, stop); // уничтожение объектов событий cudaEventDestroy(start); cudaEventDestroy(stop); printf("GPU time: %lf seconds\n", (double)(elapsedTime) / CLOCKS_PER_SEC); // Print results std::cout << "\nGPU result: " << c[0]+c[1] << "\n" << "CPU result: " << sumCPU << "\n"; // Cleanup free(a); free(c); cudaFree(d_a); cudaFree(d_c); return 0; }
4e4a525aee5d7264e2190f86fe7a8d0ed2fe1a53.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <fstream> #include <stdlib.h> #include "matrix_hip.cuh" using namespace std; void samplePatches(int patchSize, int numPatches, int dims[], int numRecords, char in_name[], char out_name[]) { int patchesPerRecord = numPatches / numRecords; ifstream in; in.open(in_name, std::ifstream::in | std::ifstream::binary); if (in.fail()) { printf("data file open failed!\n"); exit(-1); } ofstream out; out.open(out_name, std::ofstream::out | std::ofstream::binary); if (out.fail()) { printf("creating output file failed!\n"); exit(-1); } int dimall = dims[0]*dims[1]*dims[2]; int dim2 = dims[0]*dims[1]; MTYPE* data = (MTYPE*) malloc(dimall*sizeof(MTYPE)); for (int i = 0; i < numRecords; i++) { in.read((char*)data, dimall*sizeof(MTYPE)); for (int j = 0; j < patchesPerRecord; j++) { // data is row-major: pixels->channels->images int pixelX = rand() % (dims[0] - patchSize + 1); int pixelY = rand() % (dims[1] - patchSize + 1); for (int c = 0; c < dims[2]; c++) for (int y = 0; y < patchSize; y++) for (int x = 0; x < patchSize; x++) out.write((char*)(data + i*dimall + c*dim2 + (pixelY+y)*dims[0] + pixelX + x), sizeof(MTYPE)); } } in.close(); out.close(); } int main() { char* in_name = "/scratch0/qwang37/cifar-10-batches-bin/cifar_normalized.bin"; char* out_name = "/scratch0/qwang37/cifar-10-batches-bin/cifar_patches.bin"; int dims[3] = {32, 32, 3}; samplePatches(5, 2000000, dims, 50000, in_name, out_name); printf("patch sampling successfully finished!"); }
4e4a525aee5d7264e2190f86fe7a8d0ed2fe1a53.cu
#include <iostream> #include <fstream> #include <stdlib.h> #include "matrix.cuh" using namespace std; void samplePatches(int patchSize, int numPatches, int dims[], int numRecords, char in_name[], char out_name[]) { int patchesPerRecord = numPatches / numRecords; ifstream in; in.open(in_name, std::ifstream::in | std::ifstream::binary); if (in.fail()) { printf("data file open failed!\n"); exit(-1); } ofstream out; out.open(out_name, std::ofstream::out | std::ofstream::binary); if (out.fail()) { printf("creating output file failed!\n"); exit(-1); } int dimall = dims[0]*dims[1]*dims[2]; int dim2 = dims[0]*dims[1]; MTYPE* data = (MTYPE*) malloc(dimall*sizeof(MTYPE)); for (int i = 0; i < numRecords; i++) { in.read((char*)data, dimall*sizeof(MTYPE)); for (int j = 0; j < patchesPerRecord; j++) { // data is row-major: pixels->channels->images int pixelX = rand() % (dims[0] - patchSize + 1); int pixelY = rand() % (dims[1] - patchSize + 1); for (int c = 0; c < dims[2]; c++) for (int y = 0; y < patchSize; y++) for (int x = 0; x < patchSize; x++) out.write((char*)(data + i*dimall + c*dim2 + (pixelY+y)*dims[0] + pixelX + x), sizeof(MTYPE)); } } in.close(); out.close(); } int main() { char* in_name = "/scratch0/qwang37/cifar-10-batches-bin/cifar_normalized.bin"; char* out_name = "/scratch0/qwang37/cifar-10-batches-bin/cifar_patches.bin"; int dims[3] = {32, 32, 3}; samplePatches(5, 2000000, dims, 50000, in_name, out_name); printf("patch sampling successfully finished!"); }
1fd4839eaa3338afb0c611874bb0582e379b4e69.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "utilities.cuh" #include "functions.h" #define FULL_MASK 0xffffffff /*! \file utilities.cu defines kernel callers and kernels for some simple GPU array calculations \addtogroup utilityKernels @{ */ // Utility class used to avoid linker errors with extern // unsized shared memory arrays with templated type template<class T> struct SharedMemory { __device__ inline operator T *() { extern __shared__ int __smem[]; return (T *)__smem; } __device__ inline operator const T *() const { extern __shared__ int __smem[]; return (T *)__smem; } }; // specialize for double to avoid unaligned memory // access compile errors template<> struct SharedMemory<double> { __device__ inline operator double *() { extern __shared__ double __smem_d[]; return (double *)__smem_d; } __device__ inline operator const double *() const { extern __shared__ double __smem_d[]; return (double *)__smem_d; } }; /* This version adds multiple elements per thread sequentially. This reduces the overall cost of the algorithm while keeping the work complexity O(n) and the step complexity O(log n). (Brent's Theorem optimization) Note, this kernel needs a minimum of 64*sizeof(T) bytes of shared memory. In other words if blockSize <= 32, allocate 64*sizeof(T) bytes. If blockSize > 32, allocate blockSize*sizeof(T) bytes. */ template <class T, unsigned int blockSize> __global__ void reduce6(T *g_idata, T *g_odata, unsigned int n) { T *sdata = SharedMemory<T>(); // perform first level of reduction, // reading from global memory, writing to shared memory unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockSize*2 + threadIdx.x; unsigned int gridSize = blockSize*2*gridDim.x; T mySum = 0; // we reduce multiple elements per thread. The number is determined by the // number of active thread blocks (via gridDim). More blocks will result // in a larger gridSize and therefore fewer elements per thread while (i < n) { mySum += g_idata[i]; // ensure we don't read out of bounds -- this is optimized away for powerOf2 sized arrays if (i + blockSize < n) mySum += g_idata[i+blockSize]; i += gridSize; } // each thread puts its local sum into shared memory sdata[tid] = mySum; __syncthreads(); // do reduction in shared mem if ((blockSize >= 512) && (tid < 256)) { sdata[tid] = mySum = mySum + sdata[tid + 256]; } __syncthreads(); if ((blockSize >= 256) &&(tid < 128)) { sdata[tid] = mySum = mySum + sdata[tid + 128]; } __syncthreads(); if ((blockSize >= 128) && (tid < 64)) { sdata[tid] = mySum = mySum + sdata[tid + 64]; } __syncthreads(); #if (__CUDA_ARCH__ >= 300 ) if ( tid < 32 ) { // Fetch final intermediate sum from 2nd warp if (blockSize >= 64) mySum += sdata[tid + 32]; // Reduce final warp using shuffle for (int offset = warpSize/2; offset > 0; offset /= 2) { mySum += __shfl_down_sync(FULL_MASK,mySum, offset); } } #else // fully unroll reduction within a single warp if ((blockSize >= 64) && (tid < 32)) { sdata[tid] = mySum = mySum + sdata[tid + 32]; } __syncthreads(); if ((blockSize >= 32) && (tid < 16)) { sdata[tid] = mySum = mySum + sdata[tid + 16]; } __syncthreads(); if ((blockSize >= 16) && (tid < 8)) { sdata[tid] = mySum = mySum + sdata[tid + 8]; } __syncthreads(); if ((blockSize >= 8) && (tid < 4)) { sdata[tid] = mySum = mySum + sdata[tid + 4]; } __syncthreads(); if ((blockSize >= 4) && (tid < 2)) { sdata[tid] = mySum = mySum + sdata[tid + 2]; } __syncthreads(); if ((blockSize >= 2) && ( tid < 1)) { sdata[tid] = mySum = mySum + sdata[tid + 1]; } __syncthreads(); #endif // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = mySum; } template <class T> void reduce(int size, int threads, int blocks, T *d_idata, T *d_odata) { dim3 dimBlock(threads, 1, 1); dim3 dimGrid(blocks, 1, 1); // when there is only one warp per block, we need to allocate two warps // worth of shared memory so that we don't index shared memory out of bounds int smemSize = (threads <= 32) ? 2 * threads * sizeof(T) : threads * sizeof(T); switch (threads) { case 512: hipLaunchKernelGGL(( reduce6<T, 512>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 256: hipLaunchKernelGGL(( reduce6<T, 256>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 128: hipLaunchKernelGGL(( reduce6<T, 128>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 64: hipLaunchKernelGGL(( reduce6<T, 64>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 32: hipLaunchKernelGGL(( reduce6<T, 32>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 16: hipLaunchKernelGGL(( reduce6<T,16>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 8: hipLaunchKernelGGL(( reduce6<T,8>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 4: hipLaunchKernelGGL(( reduce6<T,4>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 2: hipLaunchKernelGGL(( reduce6<T,2>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 1: hipLaunchKernelGGL(( reduce6<T,1>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; } } //////////////////////////////////////////////////////////////////////////////// // This function performs a reduction of the input data multiple times and // measures the average reduction time. //////////////////////////////////////////////////////////////////////////////// template <class T> T gpuReduction(int n, int numThreads, int numBlocks, int maxThreads, int maxBlocks, T *d_idata, T *d_odata) { T gpu_result = 0; bool needReadBack = true; int cpuFinalThreshold = 1; gpu_result = 0; // execute the kernel reduce<T>(n, numThreads, numBlocks, d_idata, d_odata); HANDLE_ERROR(hipGetLastError()); //T *h_odata = (T *) malloc(numBlocks*sizeof(T)); // sum partial block sums on GPU int s=numBlocks; while (s > cpuFinalThreshold) { int threads = 0, blocks = 0; getNumBlocksAndThreads(s, maxBlocks, maxThreads, blocks, threads); reduce<T>(s, threads, blocks, d_odata, d_odata); s = (s + (threads*2-1)) / (threads*2); } /* if (s > 1) { // copy result from device to host hipMemcpy(h_odata, d_odata, s * sizeof(T), hipMemcpyDeviceToHost); for (int i=0; i < s; i++) { gpu_result += h_odata[i]; } needReadBack = false; } */ // copy final sum from device to host if (needReadBack) hipMemcpy(&gpu_result, d_odata, sizeof(T), hipMemcpyDeviceToHost); HANDLE_ERROR(hipGetLastError()); //free(h_odata); return gpu_result; } /*! add the first N elements of array and put it in output[helperIdx] */ __global__ void gpu_serial_reduction_kernel(scalar *array, scalar *output, int helperIdx,int N) { scalar ans = 0.0; for (int i = 0; i < N; ++i) ans += array[i]; output[helperIdx] = ans; return; }; /*! add the first N elements of array and put it in output[helperIdx]...use shared memory a bit */ __global__ void gpu_serial_reduction_kernel2(scalar *array, scalar *output, int helperIdx,int N) { int tidx = threadIdx.x; extern __shared__ scalar partialSum[]; partialSum[tidx] = 0.0; __syncthreads(); int max = N/ blockDim.x+1; for (int i = 0; i < max;++i) { int pos = blockDim.x *i+tidx; if(pos > N) continue; partialSum[tidx] += array[pos]; } __syncthreads(); if(tidx ==0) { scalar ans =0.0; for (int i = 0; i < blockDim.x; ++i) ans += partialSum[i]; output[helperIdx] = ans; } return; }; /*! perform a block reduction, storing the partial sums of input into output */ __global__ void gpu_parallel_block_reduction_kernel(scalar *input, scalar *output,int N) { extern __shared__ scalar sharedArray[]; unsigned int tidx = threadIdx.x; unsigned int i = blockDim.x * blockIdx.x + threadIdx.x; //load into shared memory and synchronize if(i < N) sharedArray[tidx] = input[i]; else sharedArray[tidx] = 0.0; __syncthreads(); //reduce for (int s = blockDim.x/2; s>0; s>>=1) { if (tidx < s) sharedArray[tidx] += sharedArray[tidx+s]; __syncthreads(); }; //write to the correct block of the output array if (tidx==0) output[blockIdx.x] = sharedArray[0]; }; /*! a slight optimization of the previous block reduction, c.f. M. Harris presentation */ __global__ void gpu_parallel_block_reduction2_kernel(scalar *input, scalar *output,int N) { extern __shared__ scalar sharedArray[]; unsigned int tidx = threadIdx.x; unsigned int i = 2*blockDim.x * blockIdx.x + threadIdx.x; scalar sum; //load into shared memory and synchronize if(i < N) sum = input[i]; else sum = 0.0; if(i + blockDim.x < N) sum += input[i+blockDim.x]; sharedArray[tidx] = sum; __syncthreads(); //reduce for (int s = blockDim.x/2; s>0; s>>=1) { if (tidx < s) sharedArray[tidx] = sum = sum+sharedArray[tidx+s]; __syncthreads(); }; //write to the correct block of the output array if (tidx==0) output[blockIdx.x] = sum; }; /*! multiple loads and loop unrolling... a slight optimization of the previous block reduction, c.f. M. Harris presentation */ __global__ void gpu_parallel_block_reduction3_kernel(scalar *input, scalar *output,int N) { extern __shared__ scalar sharedArray[]; unsigned int tidx = threadIdx.x; unsigned int i = 2*blockDim.x * blockIdx.x + threadIdx.x; if(i+blockDim.x < N) sharedArray[tidx] = input[i]+input[i+blockDim.x]; else if(i < N) sharedArray[tidx] = input[i]; else sharedArray[tidx] = 0.0; __syncthreads(); //reduce for (int stride = blockDim.x/2;stride >32; stride >>=1) { if(tidx<stride) sharedArray[tidx] += sharedArray[tidx+stride]; __syncthreads(); } if(tidx < 32) { sharedArray[tidx] += sharedArray[tidx+32]; sharedArray[tidx] += sharedArray[tidx+16]; sharedArray[tidx] += sharedArray[tidx+8]; sharedArray[tidx] += sharedArray[tidx+4]; sharedArray[tidx] += sharedArray[tidx+2]; sharedArray[tidx] += sharedArray[tidx+1]; } //write to the correct block of the output array if (tidx==0) output[blockIdx.x] = sharedArray[0]; }; /*! Store the dot product of two dVecs in a scalar vec */ __global__ void gpu_vec_dot_product_kernel(dVec *input1, dVec *input2, scalar *output,int N) { unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= N) return; output[idx] = dot(input1[idx],input2[idx]); return; }; /*! Store the dot product of two 5-component covectors in a scalar vector */ __global__ void gpu_vec_covectorDotProduct_kernel(dVec *input, scalar *output,int N) { unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= N) return; output[idx] = (4./3.)*input[idx][0]*input[idx][0] + input[idx][1]*input[idx][1] + input[idx][2]*input[idx][2] + (4./3.)*input[idx][3]*input[idx][3] + input[idx][4]*input[idx][4] - (4./3.)*input[idx][0]*input[idx][3]; return; } /*! Store the dot product of two 5-component vectors in a scalar vector */ __global__ void gpu_vec_vectorDotProduct_kernel(dVec *input1, dVec *input2, scalar *output,int N) { unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= N) return; output[idx] = input1[idx][0]*input2[idx][0] + input1[idx][1]*input2[idx][1] + input1[idx][2]*input2[idx][2] + input1[idx][3]*input2[idx][3] + input1[idx][4]*input2[idx][4] + input1[idx][0]*input2[idx][3]; return; } /*! Store the dot product of two 5-component vectors in a scalar vector */ __global__ void gpu_vec_vectorDotProduct_kernel(dVec *input, scalar *output,int N) { unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= N) return; output[idx] = input[idx][0]*input[idx][0] + input[idx][1]*input[idx][1] + input[idx][2]*input[idx][2] + input[idx][3]*input[idx][3] + input[idx][4]*input[idx][4] + input[idx][0]*input[idx][3]; return; } /*! Store the dot product of two dVecs in a scalar vec, unrolled by dimension */ __global__ void gpu_vec_dot_product_unrolled_kernel(dVec *input1, dVec *input2, scalar *output,int N) { unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; int p1 = idx / DIMENSION; int d1 = idx % DIMENSION; if (p1 >= N) return; output[idx] = input1[p1][d1]*input2[p1][d1]; return; }; /*! This kernel basically performs the operation of the "reduction2" kernel, but the shared memory gets dot products...BROKEN */ __global__ void gpu_dVec_dot_products_kernel(dVec *input1, dVec *input2, scalar *output,int N) { extern __shared__ scalar sharedArray[]; unsigned int tidx = threadIdx.x; unsigned int i = 2*blockDim.x * blockIdx.x + threadIdx.x; scalar tempSum; if(i < N) tempSum = dot(input1[i],input2[i]); else tempSum = 0.0; sharedArray[tidx] = 0.0; __syncthreads(); //reduce for (int s = blockDim.x/2;s>0; s>>=1) { if (tidx <s) sharedArray[tidx] = tempSum = tempSum+sharedArray[tidx+s]; __syncthreads(); }; //write to the correct block of the output array if(tidx==0) output[blockIdx.x] = tempSum; }; /*! This kernel basically performs the operation of the "reduction2" kernel, but the shared memory gets dot products */ __global__ void gpu_unrolled_dVec_dot_products_kernel(dVec *input1, dVec *input2, scalar *output,int N) { extern __shared__ scalar sharedArray[]; unsigned int tidx = threadIdx.x; unsigned int i = 2*blockDim.x * blockIdx.x + threadIdx.x; int p1 = i / DIMENSION; int d1 = i % DIMENSION; int p2 = (i+blockDim.x) / DIMENSION; int d2 = (i+blockDim.x) % DIMENSION; if(i+blockDim.x < N) sharedArray[tidx] = input1[p1][d1]*input2[p1][d1] + input1[p2][d2]*input2[p2][d2]; else if(i < N) sharedArray[tidx] = input1[p1][d1]*input2[p1][d1]; else sharedArray[tidx] = 0.0; __syncthreads(); //reduce for (int stride = blockDim.x/2;stride >32; stride >>=1) { if(tidx<stride) sharedArray[tidx] += sharedArray[tidx+stride]; __syncthreads(); } if(tidx < 32) { sharedArray[tidx] += sharedArray[tidx+32]; sharedArray[tidx] += sharedArray[tidx+16]; sharedArray[tidx] += sharedArray[tidx+8]; sharedArray[tidx] += sharedArray[tidx+4]; sharedArray[tidx] += sharedArray[tidx+2]; sharedArray[tidx] += sharedArray[tidx+1]; } //write to the correct block of the output array if (tidx==0) output[blockIdx.x] = sharedArray[0]; }; /*! take a vector of dVecs, a vector of scalars, a factor, and return a vector where every entry is factor*scalar[i]*(dVec[i])^2 */ __global__ void gpu_scalar_times_dVec_squared_kernel(dVec *d_vec1, scalar *d_scalars, scalar factor, scalar *d_ans, int n) { // read in the index that belongs to this thread unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= n) return; d_ans[idx] = factor * d_scalars[idx]*dot(d_vec1[idx],d_vec1[idx]); }; /*! take two vectors of dVecs and return a vector of scalars, where each entry is vec1[i].vec2[i] */ __global__ void gpu_dot_dVec_vectors_kernel(dVec *d_vec1, dVec *d_vec2, scalar *d_ans, int n) { // read in the index that belongs to this thread unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= n) return; d_ans[idx] = dot(d_vec1[idx],d_vec2[idx]); }; /*! multiply every element of an array of dVecs by the same scalar */ __global__ void gpu_dVec_times_scalar_kernel(dVec *d_vec1,scalar factor, int n) { // read in the index that belongs to this thread unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= n) return; d_vec1[idx] = factor*d_vec1[idx]; }; /*! multiply every element of an array of dVecs by the same scalar */ __global__ void gpu_dVec_times_scalar_kernel(dVec *d_vec1,scalar factor, dVec *d_ans,int n) { // read in the index that belongs to this thread unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= n) return; d_ans[idx] = factor*d_vec1[idx]; }; __global__ void gpu_dVec_plusEqual_dVec_kernel(dVec *d_vec1,dVec *d_vec2,scalar factor,int n) { // read in the index that belongs to this thread unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= n) return; int pIdx = idx / DIMENSION; int dIdx = idx % DIMENSION; d_vec1[pIdx][dIdx] += factor*d_vec2[pIdx][dIdx]; }; ///// //Kernel callers /// bool gpu_dVec_plusEqual_dVec(dVec *d_vec1, dVec *d_vec2, scalar factor, int N, int maxBlockSize) { unsigned int block_size = maxBlockSize; if (N < 128) block_size = 32; unsigned int nblocks = (DIMENSION*N)/block_size + 1; hipLaunchKernelGGL(( gpu_dVec_plusEqual_dVec_kernel), dim3(nblocks),dim3(block_size), 0, 0, d_vec1,d_vec2,factor,DIMENSION*N); HANDLE_ERROR(hipGetLastError()); return hipSuccess; }; /*! \param d_vec1 dVec input array \param factor scalar multiplication factor \param N the length of the arrays \post d_vec1 *= factor for every element */ bool gpu_dVec_times_scalar(dVec *d_vec1, scalar factor, int N) { unsigned int block_size = 128; if (N < 128) block_size = 32; unsigned int nblocks = N/block_size + 1; hipLaunchKernelGGL(( gpu_dVec_times_scalar_kernel), dim3(nblocks),dim3(block_size), 0, 0, d_vec1, factor, N); HANDLE_ERROR(hipGetLastError()); return hipSuccess; }; bool gpu_dVec_times_scalar(dVec *d_vec1, scalar factor, dVec *d_ans,int N) { unsigned int block_size = 128; if (N < 128) block_size = 32; unsigned int nblocks = N/block_size + 1; hipLaunchKernelGGL(( gpu_dVec_times_scalar_kernel), dim3(nblocks),dim3(block_size), 0, 0, d_vec1, factor, d_ans, N); HANDLE_ERROR(hipGetLastError()); return hipSuccess; }; bool gpu_scalar_times_dVec_squared(dVec *d_vec1, scalar *d_scalars, scalar factor, scalar *d_ans, int N) { unsigned int block_size = 128; if (N < 128) block_size = 32; unsigned int nblocks = N/block_size + 1; hipLaunchKernelGGL(( gpu_scalar_times_dVec_squared_kernel), dim3(nblocks),dim3(block_size), 0, 0, d_vec1, d_scalars, factor, d_ans, N); HANDLE_ERROR(hipGetLastError()); return hipSuccess; }; /*! \param d_vec1 dVec input array \param d_vec2 dVec input array \param d_ans scalar output array... d_ans[idx] = d_vec1[idx].d_vec2[idx] \param N the length of the arrays \post d_ans = d_vec1.d_vec2 */ bool gpu_dot_dVec_vectors(dVec *d_vec1, dVec *d_vec2, scalar *d_ans, int N) { unsigned int block_size = 128; if (N < 128) block_size = 32; unsigned int nblocks = N/block_size + 1; hipLaunchKernelGGL(( gpu_dot_dVec_vectors_kernel), dim3(nblocks),dim3(block_size), 0, 0, d_vec1, d_vec2, d_ans, N); HANDLE_ERROR(hipGetLastError()); return hipSuccess; }; scalar gpu_gpuarray_QT_covector_dot_product( GPUArray<dVec> &input1, GPUArray<scalar> &intermediate, GPUArray<scalar> &intermediate2, int N, int block_size) { if (N == 0) N = input1.getNumElements(); unsigned int nblocks = N/block_size + 1; GPUArray<scalar> ans(1,false,false); if(intermediate.getNumElements() <N) intermediate.resize(N); if(intermediate2.getNumElements() <N) intermediate2.resize(N); scalar result = 0; //scope for array handles { ArrayHandle<dVec> i1(input1,access_location::device,access_mode::read); ArrayHandle<scalar> inter1(intermediate,access_location::device,access_mode::overwrite); hipLaunchKernelGGL(( gpu_vec_covectorDotProduct_kernel), dim3(nblocks),dim3(block_size), 0, 0, i1.data,inter1.data,N); HANDLE_ERROR(hipGetLastError()); int numBlocks = 0; int numThreads = 0; int maxBlocks = 64; int maxThreads = 256; ArrayHandle<scalar> inter2(intermediate2,access_location::device,access_mode::overwrite); getNumBlocksAndThreads(N, maxBlocks, maxThreads, numBlocks, numThreads); result = gpuReduction(N,numThreads,numBlocks,maxThreads,maxBlocks,inter1.data,inter2.data); return result; } }; scalar gpu_gpuarray_QT_vector_dot_product( GPUArray<dVec> &input1, GPUArray<dVec> &input2, GPUArray<scalar> &intermediate, GPUArray<scalar> &intermediate2, int N, int block_size) { if (N == 0) N = input1.getNumElements(); unsigned int nblocks = N/block_size + 1; GPUArray<scalar> ans(1,false,false); if(intermediate.getNumElements() <N) intermediate.resize(N); if(intermediate2.getNumElements() <N) intermediate2.resize(N); scalar result = 0; //scope for array handles { ArrayHandle<dVec> i1(input1,access_location::device,access_mode::read); ArrayHandle<dVec> i2(input2,access_location::device,access_mode::read); ArrayHandle<scalar> inter1(intermediate,access_location::device,access_mode::overwrite); hipLaunchKernelGGL(( gpu_vec_vectorDotProduct_kernel), dim3(nblocks),dim3(block_size), 0, 0, i1.data,i2.data,inter1.data,N); HANDLE_ERROR(hipGetLastError()); int numBlocks = 0; int numThreads = 0; int maxBlocks = 64; int maxThreads = 256; ArrayHandle<scalar> inter2(intermediate2,access_location::device,access_mode::overwrite); getNumBlocksAndThreads(N, maxBlocks, maxThreads, numBlocks, numThreads); result = gpuReduction(N,numThreads,numBlocks,maxThreads,maxBlocks,inter1.data,inter2.data); return result; } }; scalar gpu_gpuarray_QT_vector_dot_product( GPUArray<dVec> &input1, GPUArray<scalar> &intermediate, GPUArray<scalar> &intermediate2, int N, int block_size) { if (N == 0) N = input1.getNumElements(); unsigned int nblocks = N/block_size + 1; GPUArray<scalar> ans(1,false,false); if(intermediate.getNumElements() <N) intermediate.resize(N); if(intermediate2.getNumElements() <N) intermediate2.resize(N); scalar result = 0; //scope for array handles { ArrayHandle<dVec> i1(input1,access_location::device,access_mode::read); ArrayHandle<scalar> inter1(intermediate,access_location::device,access_mode::overwrite); hipLaunchKernelGGL(( gpu_vec_vectorDotProduct_kernel), dim3(nblocks),dim3(block_size), 0, 0, i1.data,inter1.data,N); HANDLE_ERROR(hipGetLastError()); int numBlocks = 0; int numThreads = 0; int maxBlocks = 64; int maxThreads = 256; ArrayHandle<scalar> inter2(intermediate2,access_location::device,access_mode::overwrite); getNumBlocksAndThreads(N, maxBlocks, maxThreads, numBlocks, numThreads); result = gpuReduction(N,numThreads,numBlocks,maxThreads,maxBlocks,inter1.data,inter2.data); return result; } }; scalar gpu_gpuarray_dVec_dot_products( GPUArray<dVec> &input1, GPUArray<dVec> &input2, GPUArray<scalar> &intermediate, GPUArray<scalar> &intermediate2, int N, int block_size) { if (N == 0) N = input1.getNumElements(); int Nd = DIMENSION*N; unsigned int nblocks = N/block_size + 1; GPUArray<scalar> ans(1,false,false); if(intermediate.getNumElements() <Nd) intermediate.resize(Nd); if(intermediate2.getNumElements() <Nd) intermediate2.resize(Nd); scalar result = 0; if(true) // for testing...switch to best reduction kernel { ArrayHandle<dVec> i1(input1,access_location::device,access_mode::read); ArrayHandle<dVec> i2(input2,access_location::device,access_mode::read); ArrayHandle<scalar> inter1(intermediate,access_location::device,access_mode::overwrite); ArrayHandle<scalar> inter2(intermediate2,access_location::device,access_mode::overwrite); hipLaunchKernelGGL(( gpu_vec_dot_product_unrolled_kernel), dim3(nblocks),dim3(block_size), 0, 0, i1.data,i2.data,inter1.data,N); HANDLE_ERROR(hipGetLastError()); int numBlocks = 0; int numThreads = 0; int maxBlocks = 64; int maxThreads = 256; getNumBlocksAndThreads(Nd, maxBlocks, maxThreads, numBlocks, numThreads); result = gpuReduction(Nd,numThreads,numBlocks,maxThreads,maxBlocks,inter1.data,inter2.data); return result; } else { { ArrayHandle<dVec> i1(input1,access_location::device,access_mode::read); ArrayHandle<dVec> i2(input2,access_location::device,access_mode::read); ArrayHandle<scalar> inter1(intermediate,access_location::device,access_mode::overwrite); ArrayHandle<scalar> inter2(intermediate2,access_location::device,access_mode::overwrite); ArrayHandle<scalar> answer(ans,access_location::device,access_mode::overwrite); gpu_dVec_dot_products(i1.data,i2.data,inter1.data,inter2.data,answer.data,0,N,block_size); } ArrayHandle<scalar> answer(ans,access_location::host,access_mode::read); return answer.data[0]; } } /*! takes the dot product of every element of the two input arrays and performs a reduction on the sum \param input1 vector 1...wow! \param input2 vector 2...wow! \param intermediate an array that input is dot producted to \param intermediate2 an array that input is block-reduced to \param output the intermediate array will be sum reduced and stored in one of the components of output \param helperIdx the location in output to store the answer \param N the size of the input and intermediate arrays \param block_size the...block size. doxygen is annoying sometimes */ bool gpu_dVec_dot_products(dVec *input1,dVec *input2, scalar *intermediate, scalar *intermediate2,scalar *output, int helperIdx, int N,int block_size) { //int problemSize = DIMENSION*N; //unsigned int nblocks = problemSize/block_size + 1; unsigned int nblocks = N/block_size + 1; //first dot the vectors together hipLaunchKernelGGL(( gpu_vec_dot_product_kernel), dim3(nblocks),dim3(block_size), 0, 0, input1,input2,intermediate,N); HANDLE_ERROR(hipGetLastError()); //then call the parallel reduction routine to sum up the answer gpu_parallel_reduction(intermediate,intermediate2,output,helperIdx,N,block_size); //gpu_serial_reduction(intermediate,output,helperIdx,N); HANDLE_ERROR(hipGetLastError()); return hipSuccess; /* HANDLE_ERROR(hipGetLastError()); //first do a block reduction of input unsigned int smem = block_size*sizeof(scalar); //Do a block reduction of the input array //gpu_unrolled_dVec_dot_products_kernel<<<nblocks,block_size,smem>>>(input1,input2,intermediate, problemSize); gpu_dVec_dot_products_kernel<<<nblocks,block_size,smem>>>(input1,input2,intermediate, N); HANDLE_ERROR(hipGetLastError()); //sum reduce the temporary array, saving the result in the right slot of the output array int nb=1024; if(nblocks < nb) nb = 1; hipLaunchKernelGGL(( gpu_serial_reduction_kernel2), dim3(1),dim3(nb),nb*sizeof(scalar), 0, intermediate,output,helperIdx,nblocks+1); HANDLE_ERROR(hipGetLastError()); */ } /* A stub of a function...eventually replace with off-the-shelf solution? */ bool gpu_dVec_dot_products(dVec *input1,dVec *input2, scalar *output, int helperIdx, int N) { //scalar init = 0.0; //dVecDotProduct mult_op; //thrust::plus<scalar> add_op; //thrust::device_ptr<scalar> ptrAns = thrust::device_pointer_cast(output); //thrust::device_ptr<dVec> ptr1 = thrust::device_pointer_cast(input1); //thrust::device_ptr<dVec> ptr2 = thrust::device_pointer_cast(input2); //output[helperIdx] = thrust::inner_product(thrust::device,ptr1,ptr1+N,ptr2,init,add_op,mult_op); //output[helperIdx] = thrust::inner_product(thrust::device,input1,input1+N,input2,init,add_op,mult_op); //ptrAns[helperIdx] = thrust::inner_product(thrust::device,input1,input1+N,input2,init,add_op,mult_op); HANDLE_ERROR(hipGetLastError()); return hipSuccess; }; /*! a two-step parallel reduction algorithm that first does a partial sum reduction of input into the intermediate array, then launches a second kernel to sum reduce intermediate into output[helperIdx] \param input the input array to sum \param intermediate an array that input is block-reduced to \param output the intermediate array will be sum reduced and stored in one of the components of output \param helperIdx the location in output to store the answer \param N the size of the input and intermediate arrays \param block_size the...block size. doxygen is annoying sometimes */ bool gpu_parallel_reduction(scalar *input, scalar *intermediate, scalar *output, int helperIdx, int N,int block_size) { unsigned int nblocks = N/block_size + 1; //first do a block reduction of input //Do a block reduction of the input array //reduce(N, block_size, nblocks,input, intermediate); unsigned int smem = block_size*sizeof(scalar); hipLaunchKernelGGL(( gpu_parallel_block_reduction2_kernel), dim3(nblocks),dim3(block_size),smem, 0, input,intermediate, N); HANDLE_ERROR(hipGetLastError()); //sum reduce the temporary array, saving the result in the right slot of the output array int nb=2048; if(nblocks < nb) nb = 1; hipLaunchKernelGGL(( gpu_serial_reduction_kernel2), dim3(1),dim3(nb),nb*sizeof(scalar), 0, intermediate,output,helperIdx,nblocks+1); HANDLE_ERROR(hipGetLastError()); return hipSuccess; }; /*! This serial reduction routine should probably never be called. It provides an interface to the gpu_serial_reduction_kernel above that may be useful for testing */ bool gpu_serial_reduction(scalar *array, scalar *output, int helperIdx, int N) { hipLaunchKernelGGL(( gpu_serial_reduction_kernel), dim3(1),dim3(1), 0, 0, array,output,helperIdx,N); HANDLE_ERROR(hipGetLastError()); return hipSuccess; }; /*! A function of convenience... set an array on the device */ template <typename T> __global__ void gpu_set_array_kernel(T *arr,T value, int N) { // read in the particle that belongs to this thread unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= N) return; arr[idx] = value; return; }; template<typename T> bool gpu_set_array(T *array, T value, int N,int maxBlockSize) { unsigned int block_size = maxBlockSize; if (N < 128) block_size = 16; unsigned int nblocks = N/block_size + 1; hipLaunchKernelGGL(( gpu_set_array_kernel), dim3(nblocks), dim3(block_size), 0, 0, array,value,N); HANDLE_ERROR(hipGetLastError()); return hipSuccess; } template <typename T> __global__ void gpu_copy_gpuarray_kernel(T *copyInto,T *copyFrom, int N) { // read in the particle that belongs to this thread unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= N) return; copyInto[idx] = copyFrom[idx]; return; }; template<typename T> bool gpu_copy_gpuarray(GPUArray<T> &copyInto,GPUArray<T> &copyFrom,int maxBlockSize) { int N = copyFrom.getNumElements(); if(copyInto.getNumElements() < N) copyInto.resize(N); unsigned int block_size = maxBlockSize; if (N < 128) block_size = 32; unsigned int nblocks = (N)/block_size + 1; ArrayHandle<T> ci(copyInto,access_location::device,access_mode::overwrite); ArrayHandle<T> cf(copyFrom,access_location::device,access_mode::read); hipLaunchKernelGGL(( gpu_copy_gpuarray_kernel), dim3(nblocks),dim3(block_size), 0, 0, ci.data,cf.data,N); HANDLE_ERROR(hipGetLastError()); return hipSuccess; } scalar host_dVec_dot_products(dVec *input1,dVec *input2,int N) { scalar ans = 0.0; for (int ii = 0; ii < N; ++ii) for (int dd = 0; dd < DIMENSION; ++dd) ans +=input1[ii][dd]*input2[ii][dd]; return ans; } void host_dVec_plusEqual_dVec(dVec *d_vec1,dVec *d_vec2,scalar factor,int N) { for (int ii = 0; ii < N; ++ii) d_vec1[ii] = d_vec1[ii] + factor*d_vec2[ii]; } void host_dVec_times_scalar(dVec *d_vec1, scalar factor, dVec *d_ans, int N) { for(int ii = 0; ii < N; ++ii) d_ans[ii] = factor*d_vec1[ii]; } //explicit template instantiations template scalar gpuReduction<scalar>(int n,int numThreads,int numBlocks,int maxThreads,int maxBlocks,scalar *d_idata,scalar *d_odata); template int gpuReduction<int>(int n,int numThreads,int numBlocks,int maxThreads,int maxBlocks,int *d_idata,int *d_odata); template void reduce<int>(int size, int threads, int blocks, int *d_idata, int *d_odata); template void reduce<scalar>(int size, int threads, int blocks, scalar *d_idata, scalar *d_odata); template bool gpu_copy_gpuarray<dVec>(GPUArray<dVec> &copyInto,GPUArray<dVec> &copyFrom,int maxBlockSize); template bool gpu_copy_gpuarray<scalar>(GPUArray<scalar> &copyInto,GPUArray<scalar> &copyFrom,int maxBlockSize); template bool gpu_set_array<int>(int *,int, int, int); template bool gpu_set_array<unsigned int>(unsigned int *,unsigned int, int, int); template bool gpu_set_array<int2>(int2 *,int2, int, int); template bool gpu_set_array<scalar>(scalar *,scalar, int, int); template bool gpu_set_array<dVec>(dVec *,dVec, int, int); template bool gpu_set_array<cubicLatticeDerivativeVector>(cubicLatticeDerivativeVector *,cubicLatticeDerivativeVector, int, int); /** @} */ //end of group declaration
1fd4839eaa3338afb0c611874bb0582e379b4e69.cu
#include "utilities.cuh" #include "functions.h" #define FULL_MASK 0xffffffff /*! \file utilities.cu defines kernel callers and kernels for some simple GPU array calculations \addtogroup utilityKernels @{ */ // Utility class used to avoid linker errors with extern // unsized shared memory arrays with templated type template<class T> struct SharedMemory { __device__ inline operator T *() { extern __shared__ int __smem[]; return (T *)__smem; } __device__ inline operator const T *() const { extern __shared__ int __smem[]; return (T *)__smem; } }; // specialize for double to avoid unaligned memory // access compile errors template<> struct SharedMemory<double> { __device__ inline operator double *() { extern __shared__ double __smem_d[]; return (double *)__smem_d; } __device__ inline operator const double *() const { extern __shared__ double __smem_d[]; return (double *)__smem_d; } }; /* This version adds multiple elements per thread sequentially. This reduces the overall cost of the algorithm while keeping the work complexity O(n) and the step complexity O(log n). (Brent's Theorem optimization) Note, this kernel needs a minimum of 64*sizeof(T) bytes of shared memory. In other words if blockSize <= 32, allocate 64*sizeof(T) bytes. If blockSize > 32, allocate blockSize*sizeof(T) bytes. */ template <class T, unsigned int blockSize> __global__ void reduce6(T *g_idata, T *g_odata, unsigned int n) { T *sdata = SharedMemory<T>(); // perform first level of reduction, // reading from global memory, writing to shared memory unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockSize*2 + threadIdx.x; unsigned int gridSize = blockSize*2*gridDim.x; T mySum = 0; // we reduce multiple elements per thread. The number is determined by the // number of active thread blocks (via gridDim). More blocks will result // in a larger gridSize and therefore fewer elements per thread while (i < n) { mySum += g_idata[i]; // ensure we don't read out of bounds -- this is optimized away for powerOf2 sized arrays if (i + blockSize < n) mySum += g_idata[i+blockSize]; i += gridSize; } // each thread puts its local sum into shared memory sdata[tid] = mySum; __syncthreads(); // do reduction in shared mem if ((blockSize >= 512) && (tid < 256)) { sdata[tid] = mySum = mySum + sdata[tid + 256]; } __syncthreads(); if ((blockSize >= 256) &&(tid < 128)) { sdata[tid] = mySum = mySum + sdata[tid + 128]; } __syncthreads(); if ((blockSize >= 128) && (tid < 64)) { sdata[tid] = mySum = mySum + sdata[tid + 64]; } __syncthreads(); #if (__CUDA_ARCH__ >= 300 ) if ( tid < 32 ) { // Fetch final intermediate sum from 2nd warp if (blockSize >= 64) mySum += sdata[tid + 32]; // Reduce final warp using shuffle for (int offset = warpSize/2; offset > 0; offset /= 2) { mySum += __shfl_down_sync(FULL_MASK,mySum, offset); } } #else // fully unroll reduction within a single warp if ((blockSize >= 64) && (tid < 32)) { sdata[tid] = mySum = mySum + sdata[tid + 32]; } __syncthreads(); if ((blockSize >= 32) && (tid < 16)) { sdata[tid] = mySum = mySum + sdata[tid + 16]; } __syncthreads(); if ((blockSize >= 16) && (tid < 8)) { sdata[tid] = mySum = mySum + sdata[tid + 8]; } __syncthreads(); if ((blockSize >= 8) && (tid < 4)) { sdata[tid] = mySum = mySum + sdata[tid + 4]; } __syncthreads(); if ((blockSize >= 4) && (tid < 2)) { sdata[tid] = mySum = mySum + sdata[tid + 2]; } __syncthreads(); if ((blockSize >= 2) && ( tid < 1)) { sdata[tid] = mySum = mySum + sdata[tid + 1]; } __syncthreads(); #endif // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = mySum; } template <class T> void reduce(int size, int threads, int blocks, T *d_idata, T *d_odata) { dim3 dimBlock(threads, 1, 1); dim3 dimGrid(blocks, 1, 1); // when there is only one warp per block, we need to allocate two warps // worth of shared memory so that we don't index shared memory out of bounds int smemSize = (threads <= 32) ? 2 * threads * sizeof(T) : threads * sizeof(T); switch (threads) { case 512: reduce6<T, 512><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 256: reduce6<T, 256><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 128: reduce6<T, 128><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 64: reduce6<T, 64><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 32: reduce6<T, 32><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 16: reduce6<T,16><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 8: reduce6<T,8><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 4: reduce6<T,4><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 2: reduce6<T,2><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 1: reduce6<T,1><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; } } //////////////////////////////////////////////////////////////////////////////// // This function performs a reduction of the input data multiple times and // measures the average reduction time. //////////////////////////////////////////////////////////////////////////////// template <class T> T gpuReduction(int n, int numThreads, int numBlocks, int maxThreads, int maxBlocks, T *d_idata, T *d_odata) { T gpu_result = 0; bool needReadBack = true; int cpuFinalThreshold = 1; gpu_result = 0; // execute the kernel reduce<T>(n, numThreads, numBlocks, d_idata, d_odata); HANDLE_ERROR(cudaGetLastError()); //T *h_odata = (T *) malloc(numBlocks*sizeof(T)); // sum partial block sums on GPU int s=numBlocks; while (s > cpuFinalThreshold) { int threads = 0, blocks = 0; getNumBlocksAndThreads(s, maxBlocks, maxThreads, blocks, threads); reduce<T>(s, threads, blocks, d_odata, d_odata); s = (s + (threads*2-1)) / (threads*2); } /* if (s > 1) { // copy result from device to host cudaMemcpy(h_odata, d_odata, s * sizeof(T), cudaMemcpyDeviceToHost); for (int i=0; i < s; i++) { gpu_result += h_odata[i]; } needReadBack = false; } */ // copy final sum from device to host if (needReadBack) cudaMemcpy(&gpu_result, d_odata, sizeof(T), cudaMemcpyDeviceToHost); HANDLE_ERROR(cudaGetLastError()); //free(h_odata); return gpu_result; } /*! add the first N elements of array and put it in output[helperIdx] */ __global__ void gpu_serial_reduction_kernel(scalar *array, scalar *output, int helperIdx,int N) { scalar ans = 0.0; for (int i = 0; i < N; ++i) ans += array[i]; output[helperIdx] = ans; return; }; /*! add the first N elements of array and put it in output[helperIdx]...use shared memory a bit */ __global__ void gpu_serial_reduction_kernel2(scalar *array, scalar *output, int helperIdx,int N) { int tidx = threadIdx.x; extern __shared__ scalar partialSum[]; partialSum[tidx] = 0.0; __syncthreads(); int max = N/ blockDim.x+1; for (int i = 0; i < max;++i) { int pos = blockDim.x *i+tidx; if(pos > N) continue; partialSum[tidx] += array[pos]; } __syncthreads(); if(tidx ==0) { scalar ans =0.0; for (int i = 0; i < blockDim.x; ++i) ans += partialSum[i]; output[helperIdx] = ans; } return; }; /*! perform a block reduction, storing the partial sums of input into output */ __global__ void gpu_parallel_block_reduction_kernel(scalar *input, scalar *output,int N) { extern __shared__ scalar sharedArray[]; unsigned int tidx = threadIdx.x; unsigned int i = blockDim.x * blockIdx.x + threadIdx.x; //load into shared memory and synchronize if(i < N) sharedArray[tidx] = input[i]; else sharedArray[tidx] = 0.0; __syncthreads(); //reduce for (int s = blockDim.x/2; s>0; s>>=1) { if (tidx < s) sharedArray[tidx] += sharedArray[tidx+s]; __syncthreads(); }; //write to the correct block of the output array if (tidx==0) output[blockIdx.x] = sharedArray[0]; }; /*! a slight optimization of the previous block reduction, c.f. M. Harris presentation */ __global__ void gpu_parallel_block_reduction2_kernel(scalar *input, scalar *output,int N) { extern __shared__ scalar sharedArray[]; unsigned int tidx = threadIdx.x; unsigned int i = 2*blockDim.x * blockIdx.x + threadIdx.x; scalar sum; //load into shared memory and synchronize if(i < N) sum = input[i]; else sum = 0.0; if(i + blockDim.x < N) sum += input[i+blockDim.x]; sharedArray[tidx] = sum; __syncthreads(); //reduce for (int s = blockDim.x/2; s>0; s>>=1) { if (tidx < s) sharedArray[tidx] = sum = sum+sharedArray[tidx+s]; __syncthreads(); }; //write to the correct block of the output array if (tidx==0) output[blockIdx.x] = sum; }; /*! multiple loads and loop unrolling... a slight optimization of the previous block reduction, c.f. M. Harris presentation */ __global__ void gpu_parallel_block_reduction3_kernel(scalar *input, scalar *output,int N) { extern __shared__ scalar sharedArray[]; unsigned int tidx = threadIdx.x; unsigned int i = 2*blockDim.x * blockIdx.x + threadIdx.x; if(i+blockDim.x < N) sharedArray[tidx] = input[i]+input[i+blockDim.x]; else if(i < N) sharedArray[tidx] = input[i]; else sharedArray[tidx] = 0.0; __syncthreads(); //reduce for (int stride = blockDim.x/2;stride >32; stride >>=1) { if(tidx<stride) sharedArray[tidx] += sharedArray[tidx+stride]; __syncthreads(); } if(tidx < 32) { sharedArray[tidx] += sharedArray[tidx+32]; sharedArray[tidx] += sharedArray[tidx+16]; sharedArray[tidx] += sharedArray[tidx+8]; sharedArray[tidx] += sharedArray[tidx+4]; sharedArray[tidx] += sharedArray[tidx+2]; sharedArray[tidx] += sharedArray[tidx+1]; } //write to the correct block of the output array if (tidx==0) output[blockIdx.x] = sharedArray[0]; }; /*! Store the dot product of two dVecs in a scalar vec */ __global__ void gpu_vec_dot_product_kernel(dVec *input1, dVec *input2, scalar *output,int N) { unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= N) return; output[idx] = dot(input1[idx],input2[idx]); return; }; /*! Store the dot product of two 5-component covectors in a scalar vector */ __global__ void gpu_vec_covectorDotProduct_kernel(dVec *input, scalar *output,int N) { unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= N) return; output[idx] = (4./3.)*input[idx][0]*input[idx][0] + input[idx][1]*input[idx][1] + input[idx][2]*input[idx][2] + (4./3.)*input[idx][3]*input[idx][3] + input[idx][4]*input[idx][4] - (4./3.)*input[idx][0]*input[idx][3]; return; } /*! Store the dot product of two 5-component vectors in a scalar vector */ __global__ void gpu_vec_vectorDotProduct_kernel(dVec *input1, dVec *input2, scalar *output,int N) { unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= N) return; output[idx] = input1[idx][0]*input2[idx][0] + input1[idx][1]*input2[idx][1] + input1[idx][2]*input2[idx][2] + input1[idx][3]*input2[idx][3] + input1[idx][4]*input2[idx][4] + input1[idx][0]*input2[idx][3]; return; } /*! Store the dot product of two 5-component vectors in a scalar vector */ __global__ void gpu_vec_vectorDotProduct_kernel(dVec *input, scalar *output,int N) { unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= N) return; output[idx] = input[idx][0]*input[idx][0] + input[idx][1]*input[idx][1] + input[idx][2]*input[idx][2] + input[idx][3]*input[idx][3] + input[idx][4]*input[idx][4] + input[idx][0]*input[idx][3]; return; } /*! Store the dot product of two dVecs in a scalar vec, unrolled by dimension */ __global__ void gpu_vec_dot_product_unrolled_kernel(dVec *input1, dVec *input2, scalar *output,int N) { unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; int p1 = idx / DIMENSION; int d1 = idx % DIMENSION; if (p1 >= N) return; output[idx] = input1[p1][d1]*input2[p1][d1]; return; }; /*! This kernel basically performs the operation of the "reduction2" kernel, but the shared memory gets dot products...BROKEN */ __global__ void gpu_dVec_dot_products_kernel(dVec *input1, dVec *input2, scalar *output,int N) { extern __shared__ scalar sharedArray[]; unsigned int tidx = threadIdx.x; unsigned int i = 2*blockDim.x * blockIdx.x + threadIdx.x; scalar tempSum; if(i < N) tempSum = dot(input1[i],input2[i]); else tempSum = 0.0; sharedArray[tidx] = 0.0; __syncthreads(); //reduce for (int s = blockDim.x/2;s>0; s>>=1) { if (tidx <s) sharedArray[tidx] = tempSum = tempSum+sharedArray[tidx+s]; __syncthreads(); }; //write to the correct block of the output array if(tidx==0) output[blockIdx.x] = tempSum; }; /*! This kernel basically performs the operation of the "reduction2" kernel, but the shared memory gets dot products */ __global__ void gpu_unrolled_dVec_dot_products_kernel(dVec *input1, dVec *input2, scalar *output,int N) { extern __shared__ scalar sharedArray[]; unsigned int tidx = threadIdx.x; unsigned int i = 2*blockDim.x * blockIdx.x + threadIdx.x; int p1 = i / DIMENSION; int d1 = i % DIMENSION; int p2 = (i+blockDim.x) / DIMENSION; int d2 = (i+blockDim.x) % DIMENSION; if(i+blockDim.x < N) sharedArray[tidx] = input1[p1][d1]*input2[p1][d1] + input1[p2][d2]*input2[p2][d2]; else if(i < N) sharedArray[tidx] = input1[p1][d1]*input2[p1][d1]; else sharedArray[tidx] = 0.0; __syncthreads(); //reduce for (int stride = blockDim.x/2;stride >32; stride >>=1) { if(tidx<stride) sharedArray[tidx] += sharedArray[tidx+stride]; __syncthreads(); } if(tidx < 32) { sharedArray[tidx] += sharedArray[tidx+32]; sharedArray[tidx] += sharedArray[tidx+16]; sharedArray[tidx] += sharedArray[tidx+8]; sharedArray[tidx] += sharedArray[tidx+4]; sharedArray[tidx] += sharedArray[tidx+2]; sharedArray[tidx] += sharedArray[tidx+1]; } //write to the correct block of the output array if (tidx==0) output[blockIdx.x] = sharedArray[0]; }; /*! take a vector of dVecs, a vector of scalars, a factor, and return a vector where every entry is factor*scalar[i]*(dVec[i])^2 */ __global__ void gpu_scalar_times_dVec_squared_kernel(dVec *d_vec1, scalar *d_scalars, scalar factor, scalar *d_ans, int n) { // read in the index that belongs to this thread unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= n) return; d_ans[idx] = factor * d_scalars[idx]*dot(d_vec1[idx],d_vec1[idx]); }; /*! take two vectors of dVecs and return a vector of scalars, where each entry is vec1[i].vec2[i] */ __global__ void gpu_dot_dVec_vectors_kernel(dVec *d_vec1, dVec *d_vec2, scalar *d_ans, int n) { // read in the index that belongs to this thread unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= n) return; d_ans[idx] = dot(d_vec1[idx],d_vec2[idx]); }; /*! multiply every element of an array of dVecs by the same scalar */ __global__ void gpu_dVec_times_scalar_kernel(dVec *d_vec1,scalar factor, int n) { // read in the index that belongs to this thread unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= n) return; d_vec1[idx] = factor*d_vec1[idx]; }; /*! multiply every element of an array of dVecs by the same scalar */ __global__ void gpu_dVec_times_scalar_kernel(dVec *d_vec1,scalar factor, dVec *d_ans,int n) { // read in the index that belongs to this thread unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= n) return; d_ans[idx] = factor*d_vec1[idx]; }; __global__ void gpu_dVec_plusEqual_dVec_kernel(dVec *d_vec1,dVec *d_vec2,scalar factor,int n) { // read in the index that belongs to this thread unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= n) return; int pIdx = idx / DIMENSION; int dIdx = idx % DIMENSION; d_vec1[pIdx][dIdx] += factor*d_vec2[pIdx][dIdx]; }; ///// //Kernel callers /// bool gpu_dVec_plusEqual_dVec(dVec *d_vec1, dVec *d_vec2, scalar factor, int N, int maxBlockSize) { unsigned int block_size = maxBlockSize; if (N < 128) block_size = 32; unsigned int nblocks = (DIMENSION*N)/block_size + 1; gpu_dVec_plusEqual_dVec_kernel<<<nblocks,block_size>>>(d_vec1,d_vec2,factor,DIMENSION*N); HANDLE_ERROR(cudaGetLastError()); return cudaSuccess; }; /*! \param d_vec1 dVec input array \param factor scalar multiplication factor \param N the length of the arrays \post d_vec1 *= factor for every element */ bool gpu_dVec_times_scalar(dVec *d_vec1, scalar factor, int N) { unsigned int block_size = 128; if (N < 128) block_size = 32; unsigned int nblocks = N/block_size + 1; gpu_dVec_times_scalar_kernel<<<nblocks,block_size>>>( d_vec1, factor, N); HANDLE_ERROR(cudaGetLastError()); return cudaSuccess; }; bool gpu_dVec_times_scalar(dVec *d_vec1, scalar factor, dVec *d_ans,int N) { unsigned int block_size = 128; if (N < 128) block_size = 32; unsigned int nblocks = N/block_size + 1; gpu_dVec_times_scalar_kernel<<<nblocks,block_size>>>( d_vec1, factor, d_ans, N); HANDLE_ERROR(cudaGetLastError()); return cudaSuccess; }; bool gpu_scalar_times_dVec_squared(dVec *d_vec1, scalar *d_scalars, scalar factor, scalar *d_ans, int N) { unsigned int block_size = 128; if (N < 128) block_size = 32; unsigned int nblocks = N/block_size + 1; gpu_scalar_times_dVec_squared_kernel<<<nblocks,block_size>>>( d_vec1, d_scalars, factor, d_ans, N); HANDLE_ERROR(cudaGetLastError()); return cudaSuccess; }; /*! \param d_vec1 dVec input array \param d_vec2 dVec input array \param d_ans scalar output array... d_ans[idx] = d_vec1[idx].d_vec2[idx] \param N the length of the arrays \post d_ans = d_vec1.d_vec2 */ bool gpu_dot_dVec_vectors(dVec *d_vec1, dVec *d_vec2, scalar *d_ans, int N) { unsigned int block_size = 128; if (N < 128) block_size = 32; unsigned int nblocks = N/block_size + 1; gpu_dot_dVec_vectors_kernel<<<nblocks,block_size>>>( d_vec1, d_vec2, d_ans, N); HANDLE_ERROR(cudaGetLastError()); return cudaSuccess; }; scalar gpu_gpuarray_QT_covector_dot_product( GPUArray<dVec> &input1, GPUArray<scalar> &intermediate, GPUArray<scalar> &intermediate2, int N, int block_size) { if (N == 0) N = input1.getNumElements(); unsigned int nblocks = N/block_size + 1; GPUArray<scalar> ans(1,false,false); if(intermediate.getNumElements() <N) intermediate.resize(N); if(intermediate2.getNumElements() <N) intermediate2.resize(N); scalar result = 0; //scope for array handles { ArrayHandle<dVec> i1(input1,access_location::device,access_mode::read); ArrayHandle<scalar> inter1(intermediate,access_location::device,access_mode::overwrite); gpu_vec_covectorDotProduct_kernel<<<nblocks,block_size>>>(i1.data,inter1.data,N); HANDLE_ERROR(cudaGetLastError()); int numBlocks = 0; int numThreads = 0; int maxBlocks = 64; int maxThreads = 256; ArrayHandle<scalar> inter2(intermediate2,access_location::device,access_mode::overwrite); getNumBlocksAndThreads(N, maxBlocks, maxThreads, numBlocks, numThreads); result = gpuReduction(N,numThreads,numBlocks,maxThreads,maxBlocks,inter1.data,inter2.data); return result; } }; scalar gpu_gpuarray_QT_vector_dot_product( GPUArray<dVec> &input1, GPUArray<dVec> &input2, GPUArray<scalar> &intermediate, GPUArray<scalar> &intermediate2, int N, int block_size) { if (N == 0) N = input1.getNumElements(); unsigned int nblocks = N/block_size + 1; GPUArray<scalar> ans(1,false,false); if(intermediate.getNumElements() <N) intermediate.resize(N); if(intermediate2.getNumElements() <N) intermediate2.resize(N); scalar result = 0; //scope for array handles { ArrayHandle<dVec> i1(input1,access_location::device,access_mode::read); ArrayHandle<dVec> i2(input2,access_location::device,access_mode::read); ArrayHandle<scalar> inter1(intermediate,access_location::device,access_mode::overwrite); gpu_vec_vectorDotProduct_kernel<<<nblocks,block_size>>>(i1.data,i2.data,inter1.data,N); HANDLE_ERROR(cudaGetLastError()); int numBlocks = 0; int numThreads = 0; int maxBlocks = 64; int maxThreads = 256; ArrayHandle<scalar> inter2(intermediate2,access_location::device,access_mode::overwrite); getNumBlocksAndThreads(N, maxBlocks, maxThreads, numBlocks, numThreads); result = gpuReduction(N,numThreads,numBlocks,maxThreads,maxBlocks,inter1.data,inter2.data); return result; } }; scalar gpu_gpuarray_QT_vector_dot_product( GPUArray<dVec> &input1, GPUArray<scalar> &intermediate, GPUArray<scalar> &intermediate2, int N, int block_size) { if (N == 0) N = input1.getNumElements(); unsigned int nblocks = N/block_size + 1; GPUArray<scalar> ans(1,false,false); if(intermediate.getNumElements() <N) intermediate.resize(N); if(intermediate2.getNumElements() <N) intermediate2.resize(N); scalar result = 0; //scope for array handles { ArrayHandle<dVec> i1(input1,access_location::device,access_mode::read); ArrayHandle<scalar> inter1(intermediate,access_location::device,access_mode::overwrite); gpu_vec_vectorDotProduct_kernel<<<nblocks,block_size>>>(i1.data,inter1.data,N); HANDLE_ERROR(cudaGetLastError()); int numBlocks = 0; int numThreads = 0; int maxBlocks = 64; int maxThreads = 256; ArrayHandle<scalar> inter2(intermediate2,access_location::device,access_mode::overwrite); getNumBlocksAndThreads(N, maxBlocks, maxThreads, numBlocks, numThreads); result = gpuReduction(N,numThreads,numBlocks,maxThreads,maxBlocks,inter1.data,inter2.data); return result; } }; scalar gpu_gpuarray_dVec_dot_products( GPUArray<dVec> &input1, GPUArray<dVec> &input2, GPUArray<scalar> &intermediate, GPUArray<scalar> &intermediate2, int N, int block_size) { if (N == 0) N = input1.getNumElements(); int Nd = DIMENSION*N; unsigned int nblocks = N/block_size + 1; GPUArray<scalar> ans(1,false,false); if(intermediate.getNumElements() <Nd) intermediate.resize(Nd); if(intermediate2.getNumElements() <Nd) intermediate2.resize(Nd); scalar result = 0; if(true) // for testing...switch to best reduction kernel { ArrayHandle<dVec> i1(input1,access_location::device,access_mode::read); ArrayHandle<dVec> i2(input2,access_location::device,access_mode::read); ArrayHandle<scalar> inter1(intermediate,access_location::device,access_mode::overwrite); ArrayHandle<scalar> inter2(intermediate2,access_location::device,access_mode::overwrite); gpu_vec_dot_product_unrolled_kernel<<<nblocks,block_size>>>(i1.data,i2.data,inter1.data,N); HANDLE_ERROR(cudaGetLastError()); int numBlocks = 0; int numThreads = 0; int maxBlocks = 64; int maxThreads = 256; getNumBlocksAndThreads(Nd, maxBlocks, maxThreads, numBlocks, numThreads); result = gpuReduction(Nd,numThreads,numBlocks,maxThreads,maxBlocks,inter1.data,inter2.data); return result; } else { { ArrayHandle<dVec> i1(input1,access_location::device,access_mode::read); ArrayHandle<dVec> i2(input2,access_location::device,access_mode::read); ArrayHandle<scalar> inter1(intermediate,access_location::device,access_mode::overwrite); ArrayHandle<scalar> inter2(intermediate2,access_location::device,access_mode::overwrite); ArrayHandle<scalar> answer(ans,access_location::device,access_mode::overwrite); gpu_dVec_dot_products(i1.data,i2.data,inter1.data,inter2.data,answer.data,0,N,block_size); } ArrayHandle<scalar> answer(ans,access_location::host,access_mode::read); return answer.data[0]; } } /*! takes the dot product of every element of the two input arrays and performs a reduction on the sum \param input1 vector 1...wow! \param input2 vector 2...wow! \param intermediate an array that input is dot producted to \param intermediate2 an array that input is block-reduced to \param output the intermediate array will be sum reduced and stored in one of the components of output \param helperIdx the location in output to store the answer \param N the size of the input and intermediate arrays \param block_size the...block size. doxygen is annoying sometimes */ bool gpu_dVec_dot_products(dVec *input1,dVec *input2, scalar *intermediate, scalar *intermediate2,scalar *output, int helperIdx, int N,int block_size) { //int problemSize = DIMENSION*N; //unsigned int nblocks = problemSize/block_size + 1; unsigned int nblocks = N/block_size + 1; //first dot the vectors together gpu_vec_dot_product_kernel<<<nblocks,block_size>>>(input1,input2,intermediate,N); HANDLE_ERROR(cudaGetLastError()); //then call the parallel reduction routine to sum up the answer gpu_parallel_reduction(intermediate,intermediate2,output,helperIdx,N,block_size); //gpu_serial_reduction(intermediate,output,helperIdx,N); HANDLE_ERROR(cudaGetLastError()); return cudaSuccess; /* HANDLE_ERROR(cudaGetLastError()); //first do a block reduction of input unsigned int smem = block_size*sizeof(scalar); //Do a block reduction of the input array //gpu_unrolled_dVec_dot_products_kernel<<<nblocks,block_size,smem>>>(input1,input2,intermediate, problemSize); gpu_dVec_dot_products_kernel<<<nblocks,block_size,smem>>>(input1,input2,intermediate, N); HANDLE_ERROR(cudaGetLastError()); //sum reduce the temporary array, saving the result in the right slot of the output array int nb=1024; if(nblocks < nb) nb = 1; gpu_serial_reduction_kernel2<<<1,nb,nb*sizeof(scalar)>>>(intermediate,output,helperIdx,nblocks+1); HANDLE_ERROR(cudaGetLastError()); */ } /* A stub of a function...eventually replace with off-the-shelf solution? */ bool gpu_dVec_dot_products(dVec *input1,dVec *input2, scalar *output, int helperIdx, int N) { //scalar init = 0.0; //dVecDotProduct mult_op; //thrust::plus<scalar> add_op; //thrust::device_ptr<scalar> ptrAns = thrust::device_pointer_cast(output); //thrust::device_ptr<dVec> ptr1 = thrust::device_pointer_cast(input1); //thrust::device_ptr<dVec> ptr2 = thrust::device_pointer_cast(input2); //output[helperIdx] = thrust::inner_product(thrust::device,ptr1,ptr1+N,ptr2,init,add_op,mult_op); //output[helperIdx] = thrust::inner_product(thrust::device,input1,input1+N,input2,init,add_op,mult_op); //ptrAns[helperIdx] = thrust::inner_product(thrust::device,input1,input1+N,input2,init,add_op,mult_op); HANDLE_ERROR(cudaGetLastError()); return cudaSuccess; }; /*! a two-step parallel reduction algorithm that first does a partial sum reduction of input into the intermediate array, then launches a second kernel to sum reduce intermediate into output[helperIdx] \param input the input array to sum \param intermediate an array that input is block-reduced to \param output the intermediate array will be sum reduced and stored in one of the components of output \param helperIdx the location in output to store the answer \param N the size of the input and intermediate arrays \param block_size the...block size. doxygen is annoying sometimes */ bool gpu_parallel_reduction(scalar *input, scalar *intermediate, scalar *output, int helperIdx, int N,int block_size) { unsigned int nblocks = N/block_size + 1; //first do a block reduction of input //Do a block reduction of the input array //reduce(N, block_size, nblocks,input, intermediate); unsigned int smem = block_size*sizeof(scalar); gpu_parallel_block_reduction2_kernel<<<nblocks,block_size,smem>>>(input,intermediate, N); HANDLE_ERROR(cudaGetLastError()); //sum reduce the temporary array, saving the result in the right slot of the output array int nb=2048; if(nblocks < nb) nb = 1; gpu_serial_reduction_kernel2<<<1,nb,nb*sizeof(scalar)>>>(intermediate,output,helperIdx,nblocks+1); HANDLE_ERROR(cudaGetLastError()); return cudaSuccess; }; /*! This serial reduction routine should probably never be called. It provides an interface to the gpu_serial_reduction_kernel above that may be useful for testing */ bool gpu_serial_reduction(scalar *array, scalar *output, int helperIdx, int N) { gpu_serial_reduction_kernel<<<1,1>>>(array,output,helperIdx,N); HANDLE_ERROR(cudaGetLastError()); return cudaSuccess; }; /*! A function of convenience... set an array on the device */ template <typename T> __global__ void gpu_set_array_kernel(T *arr,T value, int N) { // read in the particle that belongs to this thread unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= N) return; arr[idx] = value; return; }; template<typename T> bool gpu_set_array(T *array, T value, int N,int maxBlockSize) { unsigned int block_size = maxBlockSize; if (N < 128) block_size = 16; unsigned int nblocks = N/block_size + 1; gpu_set_array_kernel<<<nblocks, block_size>>>(array,value,N); HANDLE_ERROR(cudaGetLastError()); return cudaSuccess; } template <typename T> __global__ void gpu_copy_gpuarray_kernel(T *copyInto,T *copyFrom, int N) { // read in the particle that belongs to this thread unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= N) return; copyInto[idx] = copyFrom[idx]; return; }; template<typename T> bool gpu_copy_gpuarray(GPUArray<T> &copyInto,GPUArray<T> &copyFrom,int maxBlockSize) { int N = copyFrom.getNumElements(); if(copyInto.getNumElements() < N) copyInto.resize(N); unsigned int block_size = maxBlockSize; if (N < 128) block_size = 32; unsigned int nblocks = (N)/block_size + 1; ArrayHandle<T> ci(copyInto,access_location::device,access_mode::overwrite); ArrayHandle<T> cf(copyFrom,access_location::device,access_mode::read); gpu_copy_gpuarray_kernel<<<nblocks,block_size>>>(ci.data,cf.data,N); HANDLE_ERROR(cudaGetLastError()); return cudaSuccess; } scalar host_dVec_dot_products(dVec *input1,dVec *input2,int N) { scalar ans = 0.0; for (int ii = 0; ii < N; ++ii) for (int dd = 0; dd < DIMENSION; ++dd) ans +=input1[ii][dd]*input2[ii][dd]; return ans; } void host_dVec_plusEqual_dVec(dVec *d_vec1,dVec *d_vec2,scalar factor,int N) { for (int ii = 0; ii < N; ++ii) d_vec1[ii] = d_vec1[ii] + factor*d_vec2[ii]; } void host_dVec_times_scalar(dVec *d_vec1, scalar factor, dVec *d_ans, int N) { for(int ii = 0; ii < N; ++ii) d_ans[ii] = factor*d_vec1[ii]; } //explicit template instantiations template scalar gpuReduction<scalar>(int n,int numThreads,int numBlocks,int maxThreads,int maxBlocks,scalar *d_idata,scalar *d_odata); template int gpuReduction<int>(int n,int numThreads,int numBlocks,int maxThreads,int maxBlocks,int *d_idata,int *d_odata); template void reduce<int>(int size, int threads, int blocks, int *d_idata, int *d_odata); template void reduce<scalar>(int size, int threads, int blocks, scalar *d_idata, scalar *d_odata); template bool gpu_copy_gpuarray<dVec>(GPUArray<dVec> &copyInto,GPUArray<dVec> &copyFrom,int maxBlockSize); template bool gpu_copy_gpuarray<scalar>(GPUArray<scalar> &copyInto,GPUArray<scalar> &copyFrom,int maxBlockSize); template bool gpu_set_array<int>(int *,int, int, int); template bool gpu_set_array<unsigned int>(unsigned int *,unsigned int, int, int); template bool gpu_set_array<int2>(int2 *,int2, int, int); template bool gpu_set_array<scalar>(scalar *,scalar, int, int); template bool gpu_set_array<dVec>(dVec *,dVec, int, int); template bool gpu_set_array<cubicLatticeDerivativeVector>(cubicLatticeDerivativeVector *,cubicLatticeDerivativeVector, int, int); /** @} */ //end of group declaration
vector_addition.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <time.h> int are_vectors_equal(int* a, int* b, int n); /* The old-fashioned CPU-only way to add two vectors */ void add_vectors_host(int *result, int *a, int *b, int n) { for (int i=0; i<n; i++) result[i] = a[i] + b[i]; } /* The kernel that will execute on the GPU */ __global__ void add_vectors_kernel(int *result, int *a, int *b, int n) { int idx = blockDim.x * blockIdx.x + threadIdx.x; // If we have more threads than the magnitude of our vector, we need to // make sure that the excess threads don't try to save results into // unallocated memory. if (idx < n) result[idx] = a[idx] + b[idx]; } /* This function encapsulates the process of creating and tearing down the * environment used to execute our vector addition kernel. The steps of the * process are: * 1. Allocate memory on the device to hold our vectors * 2. Copy the vectors to device memory * 3. Execute the kernel * 4. Retrieve the result vector from the device by copying it to the host * 5. Free memory on the device */ void add_vectors_dev(int *result, int *a, int *b, int n) { // Step 1: Allocate memory int *a_dev, *b_dev, *result_dev; // Since hipMalloc does not return a pointer like C's traditional malloc // (it returns a success status instead), we provide as it's first argument // the address of our device pointer variable so that it can change the // value of our pointer to the correct device address. hipMalloc((void **) &a_dev, sizeof(int) * n); hipMalloc((void **) &b_dev, sizeof(int) * n); hipMalloc((void **) &result_dev, sizeof(int) * n); // Step 2: Copy the input vectors to the device hipError_t err = hipMemcpy(a_dev, a, sizeof(int) * n, hipMemcpyHostToDevice); if (err != hipSuccess) printf("ERROR!!!!!!!!!!!!!!!!!!!!!!!!!!!"); hipMemcpy(b_dev, b, sizeof(int) * n, hipMemcpyHostToDevice); // Step 3: Invoke the kernel // We allocate enough blocks (each 512 threads long) in the grid to // accomodate all `n` elements in the vectors. The 512 long block size // is somewhat arbitrary, but with the constraint that we know the // hardware will support blocks of that size. dim3 dimGrid((n + 512 - 1) / 512, 1, 1); dim3 dimBlock(512, 1, 1); hipLaunchKernelGGL(( add_vectors_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, result_dev, a_dev, b_dev, n); // Step 4: Retrieve the results hipMemcpy(result, result_dev, sizeof(int) * n, hipMemcpyDeviceToHost); // Step 5: Free device memory hipFree(a_dev); hipFree(b_dev); hipFree(result_dev); } void print_vector(int *array, int n) { int i; for (i=0; i<n; i++) printf("%d ", array[i]); printf("\n"); } int main(void) { int n = 5; // Length of the arrays int a[] = {0, 1, 2, 3, 4}; int b[] = {5, 6, 7, 8, 9}; int host_result[5]; int device_result[5]; int l, i; int* rand_a, *rand_b, *rand_host_result, *rand_device_result; clock_t start, stop; double gpu_time, cpu_time; printf("Please enter vector length: "); scanf("%d", &l); rand_a = (int*) malloc(sizeof(int)*l); rand_b = (int*) malloc(sizeof(int)*l); rand_host_result = (int*) malloc(sizeof(int)*l); rand_device_result = (int*) malloc(sizeof(int)*l); printf("The CPU's answer: "); add_vectors_host(host_result, a, b, n); print_vector(host_result, n); printf("The GPU's answer: "); add_vectors_dev(device_result, a, b, n); print_vector(device_result, n); printf("Generating vectors of length %d... \n", l); for(i=0; i<l; ++i) { rand_a[i] = rand() % 10; rand_b[i] = rand() % 10; //printf("%d: %d, %d \n", i, rand_a[i], rand_b[i]); } start = clock(); add_vectors_host(rand_host_result, rand_a, rand_b, l); stop = clock(); cpu_time = (double) (stop-start)/CLOCKS_PER_SEC; start = clock(); add_vectors_dev(rand_device_result, rand_a, rand_b, l); stop = clock(); gpu_time = (double) (stop-start)/CLOCKS_PER_SEC; //print_vector(rand_host_result, l); printf("CPU compute time: %f", cpu_time); printf("\n"); printf("GPU compute time: %f", gpu_time); printf("\n"); printf("Ratio: %f", cpu_time / gpu_time); printf("\n"); if(!are_vectors_equal(rand_host_result, rand_device_result, l)) { printf("WARNING! Host and device results do not agree"); } free(rand_a); free(rand_b); return 0; } int are_vectors_equal(int* a, int* b, int n) { // Return 1 if vectors a and be are equal, else return 0. int i; for (i=0; i<n; ++i) { if (a[i] != b[i]) return 0; } return 1; }
vector_addition.cu
#include <stdio.h> #include <stdlib.h> #include <time.h> int are_vectors_equal(int* a, int* b, int n); /* The old-fashioned CPU-only way to add two vectors */ void add_vectors_host(int *result, int *a, int *b, int n) { for (int i=0; i<n; i++) result[i] = a[i] + b[i]; } /* The kernel that will execute on the GPU */ __global__ void add_vectors_kernel(int *result, int *a, int *b, int n) { int idx = blockDim.x * blockIdx.x + threadIdx.x; // If we have more threads than the magnitude of our vector, we need to // make sure that the excess threads don't try to save results into // unallocated memory. if (idx < n) result[idx] = a[idx] + b[idx]; } /* This function encapsulates the process of creating and tearing down the * environment used to execute our vector addition kernel. The steps of the * process are: * 1. Allocate memory on the device to hold our vectors * 2. Copy the vectors to device memory * 3. Execute the kernel * 4. Retrieve the result vector from the device by copying it to the host * 5. Free memory on the device */ void add_vectors_dev(int *result, int *a, int *b, int n) { // Step 1: Allocate memory int *a_dev, *b_dev, *result_dev; // Since cudaMalloc does not return a pointer like C's traditional malloc // (it returns a success status instead), we provide as it's first argument // the address of our device pointer variable so that it can change the // value of our pointer to the correct device address. cudaMalloc((void **) &a_dev, sizeof(int) * n); cudaMalloc((void **) &b_dev, sizeof(int) * n); cudaMalloc((void **) &result_dev, sizeof(int) * n); // Step 2: Copy the input vectors to the device cudaError_t err = cudaMemcpy(a_dev, a, sizeof(int) * n, cudaMemcpyHostToDevice); if (err != cudaSuccess) printf("ERROR!!!!!!!!!!!!!!!!!!!!!!!!!!!"); cudaMemcpy(b_dev, b, sizeof(int) * n, cudaMemcpyHostToDevice); // Step 3: Invoke the kernel // We allocate enough blocks (each 512 threads long) in the grid to // accomodate all `n` elements in the vectors. The 512 long block size // is somewhat arbitrary, but with the constraint that we know the // hardware will support blocks of that size. dim3 dimGrid((n + 512 - 1) / 512, 1, 1); dim3 dimBlock(512, 1, 1); add_vectors_kernel<<<dimGrid, dimBlock>>>(result_dev, a_dev, b_dev, n); // Step 4: Retrieve the results cudaMemcpy(result, result_dev, sizeof(int) * n, cudaMemcpyDeviceToHost); // Step 5: Free device memory cudaFree(a_dev); cudaFree(b_dev); cudaFree(result_dev); } void print_vector(int *array, int n) { int i; for (i=0; i<n; i++) printf("%d ", array[i]); printf("\n"); } int main(void) { int n = 5; // Length of the arrays int a[] = {0, 1, 2, 3, 4}; int b[] = {5, 6, 7, 8, 9}; int host_result[5]; int device_result[5]; int l, i; int* rand_a, *rand_b, *rand_host_result, *rand_device_result; clock_t start, stop; double gpu_time, cpu_time; printf("Please enter vector length: "); scanf("%d", &l); rand_a = (int*) malloc(sizeof(int)*l); rand_b = (int*) malloc(sizeof(int)*l); rand_host_result = (int*) malloc(sizeof(int)*l); rand_device_result = (int*) malloc(sizeof(int)*l); printf("The CPU's answer: "); add_vectors_host(host_result, a, b, n); print_vector(host_result, n); printf("The GPU's answer: "); add_vectors_dev(device_result, a, b, n); print_vector(device_result, n); printf("Generating vectors of length %d... \n", l); for(i=0; i<l; ++i) { rand_a[i] = rand() % 10; rand_b[i] = rand() % 10; //printf("%d: %d, %d \n", i, rand_a[i], rand_b[i]); } start = clock(); add_vectors_host(rand_host_result, rand_a, rand_b, l); stop = clock(); cpu_time = (double) (stop-start)/CLOCKS_PER_SEC; start = clock(); add_vectors_dev(rand_device_result, rand_a, rand_b, l); stop = clock(); gpu_time = (double) (stop-start)/CLOCKS_PER_SEC; //print_vector(rand_host_result, l); printf("CPU compute time: %f", cpu_time); printf("\n"); printf("GPU compute time: %f", gpu_time); printf("\n"); printf("Ratio: %f", cpu_time / gpu_time); printf("\n"); if(!are_vectors_equal(rand_host_result, rand_device_result, l)) { printf("WARNING! Host and device results do not agree"); } free(rand_a); free(rand_b); return 0; } int are_vectors_equal(int* a, int* b, int n) { // Return 1 if vectors a and be are equal, else return 0. int i; for (i=0; i<n; ++i) { if (a[i] != b[i]) return 0; } return 1; }
a7c53ef1359f534001c7a94dc91db70d96ecf5b1.hip
// !!! This is a file automatically generated by hipify!!! #include <f/device/device_assert/cuda_assert.hpp> #include <f/device/device_assert/cublas_assert.hpp> #include <f/device/device_assert/kernel_assert.hpp> #include <hip/hip_runtime.h> #include <rocblas.h> #include <hip/hip_complex.h> #include <math_functions.h> #if 1 //should call with Dznrm2<<<1,128>>>(...) __global__ void Dznrm2( unsigned long m, double2 *dA, double *dxnorm ) { unsigned long i = threadIdx.x; __shared__ double x[128]; double lsum = 0.0; for( unsigned long j = i; j < m; j += 128 ) { double const re = dA[j].x; double const im = dA[j].y; lsum += re*re + im*im; } x[i] = lsum; __syncthreads(); if ( i < 64 ) { x[i] += x[i+ 64]; } __syncthreads(); if ( i < 32 ) { x[i] += x[i+ 32]; } __syncthreads(); if ( i < 16 ) { x[i] += x[i+ 16]; } __syncthreads(); if ( i < 8 ) { x[i] += x[i+ 8]; } __syncthreads(); if ( i < 4 ) { x[i] += x[i+ 4]; } __syncthreads(); if ( i < 2 ) { x[i] += x[i+ 2]; } __syncthreads(); if ( i < 1 ) { x[i] += x[i+ 1]; } __syncthreads(); if ( i == 0 ) *dxnorm = sqrt(x[0]); } __global__ void Dasum( unsigned long m, double2 *dA, double *dxnorm ) { unsigned long i = threadIdx.x; __shared__ double x[128]; double lsum = 0.0; for( unsigned long j = i; j < m; j += 128 ) { double const re = dA[j].x; double const im = dA[j].y; lsum += sqrt(re*re + im*im); } x[i] = lsum; __syncthreads(); if ( i < 64 ) { x[i] += x[i+ 64]; } __syncthreads(); if ( i < 32 ) { x[i] += x[i+ 32]; } __syncthreads(); if ( i < 16 ) { x[i] += x[i+ 16]; } __syncthreads(); if ( i < 8 ) { x[i] += x[i+ 8]; } __syncthreads(); if ( i < 4 ) { x[i] += x[i+ 4]; } __syncthreads(); if ( i < 2 ) { x[i] += x[i+ 2]; } __syncthreads(); if ( i < 1 ) { x[i] += x[i+ 1]; } __syncthreads(); if ( i == 0 ) *dxnorm = x[0]; } #endif #if 0 __global__ void Dznrm2( unsigned long int n, double2* x, double* the_norm ) { __shared__ double sSum[512]; double res = 0.0; double2* lastX = x + n; x += threadIdx.x + blockIdx.x*512; unsigned long const blockOffset = gridDim.x*512; while ( x < lastX ) { double R = (*x).x; double I = (*x).y; res += R * R + I * I; x += blockOffset; } if (threadIdx.x >= 32) sSum[threadIdx.x] = res; __syncthreads(); if (threadIdx.x < 32) for ( unsigned long i=1; i < 16; ++i ) res += sSum[i*32 + threadIdx.x]; __syncthreads(); if (threadIdx.x < 32) { double* vsSum = sSum; vsSum[threadIdx.x] = res; if (threadIdx.x < 16) vsSum[threadIdx.x] += vsSum[threadIdx.x + 16]; __syncthreads(); if (threadIdx.x < 8) vsSum[threadIdx.x] += vsSum[threadIdx.x + 8]; __syncthreads(); if (threadIdx.x < 4) vsSum[threadIdx.x] += vsSum[threadIdx.x + 4]; __syncthreads(); if (threadIdx.x < 2) vsSum[threadIdx.x] += vsSum[threadIdx.x + 2]; __syncthreads(); if (threadIdx.x == 0) *the_norm = sqrt( vsSum[0] + vsSum[1] ); } } #endif //should call with Zscale<<<1, 128>>>(...); __global__ void Zscal( unsigned long m, double real, double2* dA ) { const int i = threadIdx.x; for( unsigned long j = i; j < m; j += 128 ) { dA[j].x *= real; dA[j].y *= real; } } __global__ //<<<((dim+15)/16,(dim+15)/16), (16,16)>>> void Zgemm( double2* P, double2* M, double2* N, unsigned long dim, double alpha ) { typedef double value_type; typedef double2 complex_type; typedef unsigned long size_type; __shared__ value_type _M[16][17]; __shared__ value_type _m[16][17]; __shared__ value_type _N[16][17]; __shared__ value_type _n[16][17]; const size_type bx = blockIdx.x; const size_type by = blockIdx.y; const size_type tx = threadIdx.x; const size_type ty = threadIdx.y; const size_type row = by * 16 + ty; const size_type col = bx * 16 + tx; const size_type iter_n = (dim+15)/16; value_type R = 0.0; value_type I = 0.0; for ( size_type i = 0; i != iter_n; ++i ) { if ( i * 16 + tx < dim && row < dim ) { _M[ty][tx] = (*( M + row * dim + i * 16 + tx )).x; _m[ty][tx] = (*( M + row * dim + i * 16 + tx )).y; } else { _M[ty][tx] = 0.0; _m[ty][tx] = 0.0; } if ( i * 16 + ty < dim && col < dim ) { _N[ty][tx] = (*( N + ( i * 16 + ty ) * dim + col )).x; _n[ty][tx] = (*( N + ( i * 16 + ty ) * dim + col )).y; } else { _N[ty][tx] = 0.0; _n[ty][tx] = 0.0; } __syncthreads(); #pragma unroll for ( size_type j = 0; j != 16; ++j ) { R += _M[ty][j] * _N[j][tx] - _m[ty][j] * _n[j][tx]; I += _M[ty][j] * _n[j][tx] + _m[ty][j] * _N[j][tx]; } __syncthreads(); } if ( row < dim && col < dim ) { (*( P + row * dim + col )).x = alpha * R; (*( P + row * dim + col )).y = alpha * I; } } __global__ void //<<<1,128>>> Zcopy( unsigned long dims, double2* src, double2* dst ) { unsigned long const i = threadIdx.x; for( unsigned long j = i; j < dims; j += 128 ) { (*(dst+j)).x = (*(src+j)).x; (*(dst+j)).y = (*(src+j)).y; } } __global__ void//<<<1, 128>>> Zaxpy( unsigned long dims, double real, double imag, double2* dst, double2* src ) // dst += (real,imag) * src { unsigned long const i = threadIdx.x; double R = 0.0; double I = 0.0; for( unsigned long j = i; j < dims; j += 128 ) { R = (*(src+j)).x; I = (*(src+j)).y; (*(dst+j)).x += real * R - imag * I; (*(dst+j)).y += real * I + imag * R; } } __global__ void compose_a( double* ug, unsigned long* ar, double* diag, double thickness, double2* a, unsigned long dim ) { int const row_index = threadIdx.x; for ( unsigned long col_index = 0; col_index != dim; ++col_index ) { unsigned long a_offset = row_index * dim + col_index; unsigned long const ug_index = *(ar+a_offset); //*(a+a_offset) = make_cuDoubleComplex( *(ug+ug_index+ug_index), *(ug+ug_index+ug_index+1) ); *(a+a_offset) = make_cuDoubleComplex( -thickness * (*(ug+ug_index+ug_index+1)) / 100.0, thickness *( *(ug+ug_index+ug_index)) / 100.0 ); } //*(a+row_index*dim+row_index) = make_cuDoubleComplex( *(diag+row_index), 0.0 ); *(a+row_index*dim+row_index) = make_cuDoubleComplex( 0.0, thickness *( *(diag+row_index) ) / 100.0 ); } __global__ void extract_intensity_diff( double2* s, double* I_exp, double* I_diff, unsigned long dim, unsigned long column_index ) { int const I_offset = threadIdx.x; int const S_offset = column_index + threadIdx.x * dim; double const norm = cuCabs(*(s+S_offset)); *(I_diff+I_offset) = *(I_exp+I_offset) - norm * norm; } __global__ void extract_intensity_diff_with_offset( double2* s, double* I_exp, double* I_diff, unsigned long dim, unsigned long column_index, double ac_offset, double dc_offset ) { int const I_offset = threadIdx.x; int const S_offset = column_index + threadIdx.x * dim; double const norm = cuCabs(*(s+S_offset)); *(I_diff+I_offset) = *(I_exp+I_offset) - norm * norm * ac_offset - dc_offset; } __global__ void sum_diag( double2* a, unsigned long dim, double real, double imag ) { int const index = threadIdx.x; int const offset = index * dim + index; *(a+offset) = make_cuDoubleComplex( cuCreal(*(a+offset))+real, cuCimag(*(a+offset))+imag ); } /* * Input/Output: * ** ug[M] * ar[n][n] * diag[n] ==>> I_diff[n] ** thickness * dim -- n * I_exp[n] ** column_index * * cache: * a_[n][n] -- p2p3 * a^2_[n][n] -- s * a^3_[n][n] -- s_ * P1[n][n] * P2[n][n] * P3[n][n] * * 1) compose A * 2) scale to A_ * 3) compute A_^2 A_^3 * 4) compute (P1) (P2) (P3) * 5) square back * 6) extract one column */ __global__ void make_individual_pattern_intensity_diff( double* cuda_ug, unsigned long* cuda_ar, double* cuda_diag, double thickness, unsigned long* cuda_dim, double* cuda_I_exp, double* cuda_I_diff, unsigned long column_index, double2* cuda_cache, unsigned long max_dim, unsigned long tilt_size ) { unsigned long const tilt_index = blockDim.x * blockIdx.x + threadIdx.x; if ( tilt_index >= tilt_size ) return; unsigned long const dim = *(cuda_dim + tilt_index); double* ug = cuda_ug; unsigned long* ar = cuda_ar + tilt_index * max_dim * max_dim; double* diag = cuda_diag + tilt_index * max_dim; double* I_exp = cuda_I_exp + tilt_index * max_dim; double* I_diff = cuda_I_diff + tilt_index * max_dim; double2* cache = cuda_cache + 6 * tilt_index * max_dim * max_dim; unsigned long dimdim = dim*dim; //cache should be of size 6*N^2 double2* a_ = cache; double2* aa_ = a_ + dimdim; double2* aaa_ = aa_ + dimdim; double2* p1 = aaa_ + dimdim; double2* p2 = p1 + dimdim; double2* p3 = p2 + dimdim; //reuse memory in latter steps, when a_, aa_ and aaa_ are idle //double2* p2p3 = a_; double2* p2p3 = aaa_; double2* s = aa_; double2* s_ = aaa_; //1) kernel_assert(hipLaunchKernelGGL(( (compose_a), dim3(1), dim3(dim), 0, 0, ug, ar, diag, thickness, a_, dim )) ); cuda_assert( hipDeviceSynchronize() ); //2) //TODO double* the_norm = (double*)aa_; kernel_assert(hipLaunchKernelGGL(( (Dznrm2), dim3(1),dim3(128), 0, 0, dimdim, a_, the_norm )) ); //kernel_assert( (Dasum<<<1,128>>>( dimdim, a_, the_norm )) ); cuda_assert( hipDeviceSynchronize() ); //double const ratio = (*the_norm) * 53.71920351148152; double const ratio = (*the_norm) / 5.371920351148152; unsigned long const scaler = ratio < 1.0 ? 0 : ceil(log2(ratio)); unsigned long const scaling_factor = 1 << scaler; double const scale = scaling_factor; kernel_assert(hipLaunchKernelGGL(( (Zscal), dim3(1), dim3(128), 0, 0, dimdim, 1.0/scale, a_ )) ); //a_ /= scale cuda_assert( hipDeviceSynchronize() ); //3) dim3 const mm_grids( (dim+15)/16, (dim+15)/16 ); dim3 const mm_threads( 16, 16 ); kernel_assert(hipLaunchKernelGGL(( (Zgemm), dim3(mm_grids), dim3(mm_threads), 0, 0, aa_, a_, a_, dim, 1.0 )) ); cuda_assert( hipDeviceSynchronize() ); kernel_assert(hipLaunchKernelGGL(( (Zgemm), dim3(mm_grids), dim3(mm_threads), 0, 0, aaa_, aa_, a_, dim, 1.0 )) ); cuda_assert( hipDeviceSynchronize() ); //4) /* * Maple: * Digits := 25 * evalf(solve(_Z^9+9*_Z^8+72*_Z^7+504*_Z^6+3024*_Z^5+15120*_Z^4+60480*_Z^3+181440*_Z^2+362880*_Z+362880 = 0)) * Returns: * 2.697333461536989227389605+5.184162062649414177834087*I, //c1 * -.3810698456631129990312942+4.384644533145397950369203*I, //c2 * -2.110839800302654737498705+3.089910928725500922777702*I, //c3 * -3.038648072936697089212469+1.586801195758838328803868*I, //c4 * -3.333551485269048803294274, //c5 * -3.038648072936697089212469-1.586801195758838328803868*I, //c6 * -2.110839800302654737498705-3.089910928725500922777702*I, //c7 * -.3810698456631129990312942-4.384644533145397950369203*I, //c8 * 2.697333461536989227389605-5.184162062649414177834087*I //c9 * * expand((x-c1)*(x-c2)*(x-c3)) >> p1 ( p1_c ) * x^3-.205423815571221490859606*x^2-(12.65871752452031305098099*I)*x^2-58.21460179641193947200471*x-(3.189848964212376356715960*I)*x-19.71085376106750328141397+94.20645646169128946503649*I * * expand((x-c4)*(x-c5)*(x-c6)) >> p2 ( p2_c ) * x^3+9.410847631142442981719212*x^2+39.17363072664900708597702-6.123261017392618755198919*10^(-24)*I+32.01029973951970099352671*x+(4.*10^(-24)*I)*x * * expand((x-c7)*(x-c8)*(x-c9)) >> p3 ( p3_c ) * x^3-.205423815571221490859601*x^2+(12.65871752452031305098099*I)*x^2-58.21460179641193947200470*x+(3.18984896421237635671600*I)*x-19.71085376106750328141404-94.20645646169128946503646*I * * expand((x-c1)*(x-c2)*(x-c3)*(x-c4)*(x-c5)*(x-c6)*(x-c7)*(x-c8)*(x-c9)) * 3.628800000000000000000003*10^5-1.365022562699469279472268*10^(-19)*I+3.628800000000000000000003*10^5*x+x^9+9.00000000000000000000000*x^8+72.00000000000000000000006*x^7+503.9999999999999999999995*x^6+3024.000000000000000000002*x^5+15120.00000000000000000000*x^4+60479.99999999999999999995*x^3+1.814400000000000000000001*10^5*x^2-(5.*10^(-22)*I)*x^6-(1.*10^(-20)*I)*x^4-(1.0*10^(-19)*I)*x^3+(2.*10^(-24)*I)*x^8-(3.0*10^(-19)*I)*x^2-(7.*10^(-21)*I)*x^5-(4.*10^(-19)*I)*x+(2.*10^(-23)*I)*x^7 */ //4 - p1) kernel_assert(hipLaunchKernelGGL(( (Zcopy), dim3(1),dim3(128), 0, 0, dimdim, aaa_, p1 )) ); cuda_assert( hipDeviceSynchronize() ); kernel_assert(hipLaunchKernelGGL(( (Zaxpy), dim3(1),dim3(128), 0, 0, dimdim, -0.205423815571221490859606, -12.65871752452031305098099, p1, aa_ )) ); cuda_assert( hipDeviceSynchronize() ); kernel_assert(hipLaunchKernelGGL(( (Zaxpy), dim3(1),dim3(128), 0, 0, dimdim, -58.21460179641193947200471, -3.189848964212376356715960, p1, a_ )) ); cuda_assert( hipDeviceSynchronize() ); kernel_assert(hipLaunchKernelGGL(( (sum_diag), dim3(1),dim3(dim), 0, 0, p1, dim, -19.71085376106750328141397, 94.20645646169128946503649 )) ); cuda_assert( hipDeviceSynchronize() ); //4 - p2) kernel_assert(hipLaunchKernelGGL(( (Zcopy), dim3(1),dim3(128), 0, 0, dimdim, aaa_, p2 )) ); cuda_assert( hipDeviceSynchronize() ); kernel_assert(hipLaunchKernelGGL(( (Zaxpy), dim3(1),dim3(128), 0, 0, dimdim, 9.410847631142442981719212, 0.0, p2, aa_ )) ); cuda_assert( hipDeviceSynchronize() ); kernel_assert(hipLaunchKernelGGL(( (Zaxpy), dim3(1),dim3(128), 0, 0, dimdim, 32.01029973951970099352671, 0.0, p2, a_ )) ); cuda_assert( hipDeviceSynchronize() ); kernel_assert(hipLaunchKernelGGL(( (sum_diag), dim3(1),dim3(dim), 0, 0, p2, dim, 39.17363072664900708597702, 0.0 )) ); cuda_assert( hipDeviceSynchronize() ); //4 - p3) kernel_assert(hipLaunchKernelGGL(( (Zcopy), dim3(1),dim3(128), 0, 0, dimdim, aaa_, p3 )) ); cuda_assert( hipDeviceSynchronize() ); kernel_assert(hipLaunchKernelGGL(( (Zaxpy), dim3(1),dim3(128), 0, 0, dimdim, -0.205423815571221490859601, 12.65871752452031305098099, p3, aa_ )) ); cuda_assert( hipDeviceSynchronize() ); kernel_assert(hipLaunchKernelGGL(( (Zaxpy), dim3(1),dim3(128), 0, 0, dimdim, -58.21460179641193947200470, 3.18984896421237635671600, p3, a_ )) ); cuda_assert( hipDeviceSynchronize() ); kernel_assert(hipLaunchKernelGGL(( (sum_diag), dim3(1),dim3(dim), 0, 0, p3, dim, -19.71085376106750328141404, -94.20645646169128946503646 )) ); cuda_assert( hipDeviceSynchronize() ); //4 - s) // s = 1/602.39521910453439454428( p1 * ( 1/602.39521910453439454428 * p2 * p3 ) ) = (p1 p2 p3)/362880 kernel_assert(hipLaunchKernelGGL(( (Zgemm), dim3(mm_grids), dim3(mm_threads), 0, 0, p2p3, p2, p3, dim, 0.0016600397351866578333 )) ); cuda_assert( hipDeviceSynchronize() ); kernel_assert(hipLaunchKernelGGL(( (Zgemm), dim3(mm_grids), dim3(mm_threads), 0, 0, s, p1, p2p3, dim, 0.0016600397351866578333 )) ); cuda_assert( hipDeviceSynchronize() ); //5) if ( scaler != 0 ) { for ( unsigned long index = 0; index != scaler; ++index ) { kernel_assert(hipLaunchKernelGGL(( (Zgemm), dim3(mm_grids), dim3(mm_threads), 0, 0, s_, s, s, dim, 1.0 )) ); cuda_assert( hipDeviceSynchronize() ); double2* tmp = s_; s_ = s; s = tmp; } } //6) //kernel_assert( (extract_intensity_diff<<<1,dim>>>( s, I_exp, I_diff, dim, column_index )) ); double const ac_offset = cuda_ug[0]; double const dc_offset = cuda_ug[1]; kernel_assert(hipLaunchKernelGGL(( (extract_intensity_diff_with_offset), dim3(1),dim3(dim), 0, 0, s, I_exp, I_diff, dim, column_index, ac_offset, dc_offset )) ); cuda_assert( hipDeviceSynchronize() ); } void make_pattern_intensity_diff( double* cuda_ug, unsigned long* cuda_ar, double* cuda_diag, double thickness, unsigned long* cuda_dim, double* cuda_I_exp, double* cuda_I_diff, unsigned long column_index, double2* cuda_cache, unsigned long tilt_size, unsigned long max_dim ) { unsigned long const threads = 64; unsigned long const grids = (tilt_size + threads - 1)/threads; kernel_assert( (hipLaunchKernelGGL(( make_individual_pattern_intensity_diff), dim3(grids), dim3(threads), 0, 0, cuda_ug, cuda_ar, cuda_diag, thickness, cuda_dim, cuda_I_exp, cuda_I_diff, column_index, cuda_cache, max_dim, tilt_size ) ) ); cuda_assert( hipDeviceSynchronize() ); }
a7c53ef1359f534001c7a94dc91db70d96ecf5b1.cu
#include <f/device/device_assert/cuda_assert.hpp> #include <f/device/device_assert/cublas_assert.hpp> #include <f/device/device_assert/kernel_assert.hpp> #include <cuda_runtime.h> #include <cublas_v2.h> #include <cuComplex.h> #include <math_functions.h> #if 1 //should call with Dznrm2<<<1,128>>>(...) __global__ void Dznrm2( unsigned long m, double2 *dA, double *dxnorm ) { unsigned long i = threadIdx.x; __shared__ double x[128]; double lsum = 0.0; for( unsigned long j = i; j < m; j += 128 ) { double const re = dA[j].x; double const im = dA[j].y; lsum += re*re + im*im; } x[i] = lsum; __syncthreads(); if ( i < 64 ) { x[i] += x[i+ 64]; } __syncthreads(); if ( i < 32 ) { x[i] += x[i+ 32]; } __syncthreads(); if ( i < 16 ) { x[i] += x[i+ 16]; } __syncthreads(); if ( i < 8 ) { x[i] += x[i+ 8]; } __syncthreads(); if ( i < 4 ) { x[i] += x[i+ 4]; } __syncthreads(); if ( i < 2 ) { x[i] += x[i+ 2]; } __syncthreads(); if ( i < 1 ) { x[i] += x[i+ 1]; } __syncthreads(); if ( i == 0 ) *dxnorm = sqrt(x[0]); } __global__ void Dasum( unsigned long m, double2 *dA, double *dxnorm ) { unsigned long i = threadIdx.x; __shared__ double x[128]; double lsum = 0.0; for( unsigned long j = i; j < m; j += 128 ) { double const re = dA[j].x; double const im = dA[j].y; lsum += sqrt(re*re + im*im); } x[i] = lsum; __syncthreads(); if ( i < 64 ) { x[i] += x[i+ 64]; } __syncthreads(); if ( i < 32 ) { x[i] += x[i+ 32]; } __syncthreads(); if ( i < 16 ) { x[i] += x[i+ 16]; } __syncthreads(); if ( i < 8 ) { x[i] += x[i+ 8]; } __syncthreads(); if ( i < 4 ) { x[i] += x[i+ 4]; } __syncthreads(); if ( i < 2 ) { x[i] += x[i+ 2]; } __syncthreads(); if ( i < 1 ) { x[i] += x[i+ 1]; } __syncthreads(); if ( i == 0 ) *dxnorm = x[0]; } #endif #if 0 __global__ void Dznrm2( unsigned long int n, double2* x, double* the_norm ) { __shared__ double sSum[512]; double res = 0.0; double2* lastX = x + n; x += threadIdx.x + blockIdx.x*512; unsigned long const blockOffset = gridDim.x*512; while ( x < lastX ) { double R = (*x).x; double I = (*x).y; res += R * R + I * I; x += blockOffset; } if (threadIdx.x >= 32) sSum[threadIdx.x] = res; __syncthreads(); if (threadIdx.x < 32) for ( unsigned long i=1; i < 16; ++i ) res += sSum[i*32 + threadIdx.x]; __syncthreads(); if (threadIdx.x < 32) { double* vsSum = sSum; vsSum[threadIdx.x] = res; if (threadIdx.x < 16) vsSum[threadIdx.x] += vsSum[threadIdx.x + 16]; __syncthreads(); if (threadIdx.x < 8) vsSum[threadIdx.x] += vsSum[threadIdx.x + 8]; __syncthreads(); if (threadIdx.x < 4) vsSum[threadIdx.x] += vsSum[threadIdx.x + 4]; __syncthreads(); if (threadIdx.x < 2) vsSum[threadIdx.x] += vsSum[threadIdx.x + 2]; __syncthreads(); if (threadIdx.x == 0) *the_norm = sqrt( vsSum[0] + vsSum[1] ); } } #endif //should call with Zscale<<<1, 128>>>(...); __global__ void Zscal( unsigned long m, double real, double2* dA ) { const int i = threadIdx.x; for( unsigned long j = i; j < m; j += 128 ) { dA[j].x *= real; dA[j].y *= real; } } __global__ //<<<((dim+15)/16,(dim+15)/16), (16,16)>>> void Zgemm( double2* P, double2* M, double2* N, unsigned long dim, double alpha ) { typedef double value_type; typedef double2 complex_type; typedef unsigned long size_type; __shared__ value_type _M[16][17]; __shared__ value_type _m[16][17]; __shared__ value_type _N[16][17]; __shared__ value_type _n[16][17]; const size_type bx = blockIdx.x; const size_type by = blockIdx.y; const size_type tx = threadIdx.x; const size_type ty = threadIdx.y; const size_type row = by * 16 + ty; const size_type col = bx * 16 + tx; const size_type iter_n = (dim+15)/16; value_type R = 0.0; value_type I = 0.0; for ( size_type i = 0; i != iter_n; ++i ) { if ( i * 16 + tx < dim && row < dim ) { _M[ty][tx] = (*( M + row * dim + i * 16 + tx )).x; _m[ty][tx] = (*( M + row * dim + i * 16 + tx )).y; } else { _M[ty][tx] = 0.0; _m[ty][tx] = 0.0; } if ( i * 16 + ty < dim && col < dim ) { _N[ty][tx] = (*( N + ( i * 16 + ty ) * dim + col )).x; _n[ty][tx] = (*( N + ( i * 16 + ty ) * dim + col )).y; } else { _N[ty][tx] = 0.0; _n[ty][tx] = 0.0; } __syncthreads(); #pragma unroll for ( size_type j = 0; j != 16; ++j ) { R += _M[ty][j] * _N[j][tx] - _m[ty][j] * _n[j][tx]; I += _M[ty][j] * _n[j][tx] + _m[ty][j] * _N[j][tx]; } __syncthreads(); } if ( row < dim && col < dim ) { (*( P + row * dim + col )).x = alpha * R; (*( P + row * dim + col )).y = alpha * I; } } __global__ void //<<<1,128>>> Zcopy( unsigned long dims, double2* src, double2* dst ) { unsigned long const i = threadIdx.x; for( unsigned long j = i; j < dims; j += 128 ) { (*(dst+j)).x = (*(src+j)).x; (*(dst+j)).y = (*(src+j)).y; } } __global__ void//<<<1, 128>>> Zaxpy( unsigned long dims, double real, double imag, double2* dst, double2* src ) // dst += (real,imag) * src { unsigned long const i = threadIdx.x; double R = 0.0; double I = 0.0; for( unsigned long j = i; j < dims; j += 128 ) { R = (*(src+j)).x; I = (*(src+j)).y; (*(dst+j)).x += real * R - imag * I; (*(dst+j)).y += real * I + imag * R; } } __global__ void compose_a( double* ug, unsigned long* ar, double* diag, double thickness, double2* a, unsigned long dim ) { int const row_index = threadIdx.x; for ( unsigned long col_index = 0; col_index != dim; ++col_index ) { unsigned long a_offset = row_index * dim + col_index; unsigned long const ug_index = *(ar+a_offset); //*(a+a_offset) = make_cuDoubleComplex( *(ug+ug_index+ug_index), *(ug+ug_index+ug_index+1) ); *(a+a_offset) = make_cuDoubleComplex( -thickness * (*(ug+ug_index+ug_index+1)) / 100.0, thickness *( *(ug+ug_index+ug_index)) / 100.0 ); } //*(a+row_index*dim+row_index) = make_cuDoubleComplex( *(diag+row_index), 0.0 ); *(a+row_index*dim+row_index) = make_cuDoubleComplex( 0.0, thickness *( *(diag+row_index) ) / 100.0 ); } __global__ void extract_intensity_diff( double2* s, double* I_exp, double* I_diff, unsigned long dim, unsigned long column_index ) { int const I_offset = threadIdx.x; int const S_offset = column_index + threadIdx.x * dim; double const norm = cuCabs(*(s+S_offset)); *(I_diff+I_offset) = *(I_exp+I_offset) - norm * norm; } __global__ void extract_intensity_diff_with_offset( double2* s, double* I_exp, double* I_diff, unsigned long dim, unsigned long column_index, double ac_offset, double dc_offset ) { int const I_offset = threadIdx.x; int const S_offset = column_index + threadIdx.x * dim; double const norm = cuCabs(*(s+S_offset)); *(I_diff+I_offset) = *(I_exp+I_offset) - norm * norm * ac_offset - dc_offset; } __global__ void sum_diag( double2* a, unsigned long dim, double real, double imag ) { int const index = threadIdx.x; int const offset = index * dim + index; *(a+offset) = make_cuDoubleComplex( cuCreal(*(a+offset))+real, cuCimag(*(a+offset))+imag ); } /* * Input/Output: * ** ug[M] * ar[n][n] * diag[n] ==>> I_diff[n] ** thickness * dim -- n * I_exp[n] ** column_index * * cache: * a_[n][n] -- p2p3 * a^2_[n][n] -- s * a^3_[n][n] -- s_ * P1[n][n] * P2[n][n] * P3[n][n] * * 1) compose A * 2) scale to A_ * 3) compute A_^2 A_^3 * 4) compute (P1) (P2) (P3) * 5) square back * 6) extract one column */ __global__ void make_individual_pattern_intensity_diff( double* cuda_ug, unsigned long* cuda_ar, double* cuda_diag, double thickness, unsigned long* cuda_dim, double* cuda_I_exp, double* cuda_I_diff, unsigned long column_index, double2* cuda_cache, unsigned long max_dim, unsigned long tilt_size ) { unsigned long const tilt_index = blockDim.x * blockIdx.x + threadIdx.x; if ( tilt_index >= tilt_size ) return; unsigned long const dim = *(cuda_dim + tilt_index); double* ug = cuda_ug; unsigned long* ar = cuda_ar + tilt_index * max_dim * max_dim; double* diag = cuda_diag + tilt_index * max_dim; double* I_exp = cuda_I_exp + tilt_index * max_dim; double* I_diff = cuda_I_diff + tilt_index * max_dim; double2* cache = cuda_cache + 6 * tilt_index * max_dim * max_dim; unsigned long dimdim = dim*dim; //cache should be of size 6*N^2 double2* a_ = cache; double2* aa_ = a_ + dimdim; double2* aaa_ = aa_ + dimdim; double2* p1 = aaa_ + dimdim; double2* p2 = p1 + dimdim; double2* p3 = p2 + dimdim; //reuse memory in latter steps, when a_, aa_ and aaa_ are idle //double2* p2p3 = a_; double2* p2p3 = aaa_; double2* s = aa_; double2* s_ = aaa_; //1) kernel_assert( (compose_a<<<1, dim>>>( ug, ar, diag, thickness, a_, dim )) ); cuda_assert( cudaDeviceSynchronize() ); //2) //TODO double* the_norm = (double*)aa_; kernel_assert( (Dznrm2<<<1,128>>>( dimdim, a_, the_norm )) ); //kernel_assert( (Dasum<<<1,128>>>( dimdim, a_, the_norm )) ); cuda_assert( cudaDeviceSynchronize() ); //double const ratio = (*the_norm) * 53.71920351148152; double const ratio = (*the_norm) / 5.371920351148152; unsigned long const scaler = ratio < 1.0 ? 0 : ceil(log2(ratio)); unsigned long const scaling_factor = 1 << scaler; double const scale = scaling_factor; kernel_assert( (Zscal<<<1, 128>>>( dimdim, 1.0/scale, a_ )) ); //a_ /= scale cuda_assert( cudaDeviceSynchronize() ); //3) dim3 const mm_grids( (dim+15)/16, (dim+15)/16 ); dim3 const mm_threads( 16, 16 ); kernel_assert( (Zgemm<<<mm_grids, mm_threads>>>( aa_, a_, a_, dim, 1.0 )) ); cuda_assert( cudaDeviceSynchronize() ); kernel_assert( (Zgemm<<<mm_grids, mm_threads>>>( aaa_, aa_, a_, dim, 1.0 )) ); cuda_assert( cudaDeviceSynchronize() ); //4) /* * Maple: * Digits := 25 * evalf(solve(_Z^9+9*_Z^8+72*_Z^7+504*_Z^6+3024*_Z^5+15120*_Z^4+60480*_Z^3+181440*_Z^2+362880*_Z+362880 = 0)) * Returns: * 2.697333461536989227389605+5.184162062649414177834087*I, //c1 * -.3810698456631129990312942+4.384644533145397950369203*I, //c2 * -2.110839800302654737498705+3.089910928725500922777702*I, //c3 * -3.038648072936697089212469+1.586801195758838328803868*I, //c4 * -3.333551485269048803294274, //c5 * -3.038648072936697089212469-1.586801195758838328803868*I, //c6 * -2.110839800302654737498705-3.089910928725500922777702*I, //c7 * -.3810698456631129990312942-4.384644533145397950369203*I, //c8 * 2.697333461536989227389605-5.184162062649414177834087*I //c9 * * expand((x-c1)*(x-c2)*(x-c3)) >> p1 ( p1_c ) * x^3-.205423815571221490859606*x^2-(12.65871752452031305098099*I)*x^2-58.21460179641193947200471*x-(3.189848964212376356715960*I)*x-19.71085376106750328141397+94.20645646169128946503649*I * * expand((x-c4)*(x-c5)*(x-c6)) >> p2 ( p2_c ) * x^3+9.410847631142442981719212*x^2+39.17363072664900708597702-6.123261017392618755198919*10^(-24)*I+32.01029973951970099352671*x+(4.*10^(-24)*I)*x * * expand((x-c7)*(x-c8)*(x-c9)) >> p3 ( p3_c ) * x^3-.205423815571221490859601*x^2+(12.65871752452031305098099*I)*x^2-58.21460179641193947200470*x+(3.18984896421237635671600*I)*x-19.71085376106750328141404-94.20645646169128946503646*I * * expand((x-c1)*(x-c2)*(x-c3)*(x-c4)*(x-c5)*(x-c6)*(x-c7)*(x-c8)*(x-c9)) * 3.628800000000000000000003*10^5-1.365022562699469279472268*10^(-19)*I+3.628800000000000000000003*10^5*x+x^9+9.00000000000000000000000*x^8+72.00000000000000000000006*x^7+503.9999999999999999999995*x^6+3024.000000000000000000002*x^5+15120.00000000000000000000*x^4+60479.99999999999999999995*x^3+1.814400000000000000000001*10^5*x^2-(5.*10^(-22)*I)*x^6-(1.*10^(-20)*I)*x^4-(1.0*10^(-19)*I)*x^3+(2.*10^(-24)*I)*x^8-(3.0*10^(-19)*I)*x^2-(7.*10^(-21)*I)*x^5-(4.*10^(-19)*I)*x+(2.*10^(-23)*I)*x^7 */ //4 - p1) kernel_assert( (Zcopy<<<1,128>>>( dimdim, aaa_, p1 )) ); cuda_assert( cudaDeviceSynchronize() ); kernel_assert( (Zaxpy<<<1,128>>>( dimdim, -0.205423815571221490859606, -12.65871752452031305098099, p1, aa_ )) ); cuda_assert( cudaDeviceSynchronize() ); kernel_assert( (Zaxpy<<<1,128>>>( dimdim, -58.21460179641193947200471, -3.189848964212376356715960, p1, a_ )) ); cuda_assert( cudaDeviceSynchronize() ); kernel_assert( (sum_diag<<<1,dim>>>( p1, dim, -19.71085376106750328141397, 94.20645646169128946503649 )) ); cuda_assert( cudaDeviceSynchronize() ); //4 - p2) kernel_assert( (Zcopy<<<1,128>>>( dimdim, aaa_, p2 )) ); cuda_assert( cudaDeviceSynchronize() ); kernel_assert( (Zaxpy<<<1,128>>>( dimdim, 9.410847631142442981719212, 0.0, p2, aa_ )) ); cuda_assert( cudaDeviceSynchronize() ); kernel_assert( (Zaxpy<<<1,128>>>( dimdim, 32.01029973951970099352671, 0.0, p2, a_ )) ); cuda_assert( cudaDeviceSynchronize() ); kernel_assert( (sum_diag<<<1,dim>>>( p2, dim, 39.17363072664900708597702, 0.0 )) ); cuda_assert( cudaDeviceSynchronize() ); //4 - p3) kernel_assert( (Zcopy<<<1,128>>>( dimdim, aaa_, p3 )) ); cuda_assert( cudaDeviceSynchronize() ); kernel_assert( (Zaxpy<<<1,128>>>( dimdim, -0.205423815571221490859601, 12.65871752452031305098099, p3, aa_ )) ); cuda_assert( cudaDeviceSynchronize() ); kernel_assert( (Zaxpy<<<1,128>>>( dimdim, -58.21460179641193947200470, 3.18984896421237635671600, p3, a_ )) ); cuda_assert( cudaDeviceSynchronize() ); kernel_assert( (sum_diag<<<1,dim>>>( p3, dim, -19.71085376106750328141404, -94.20645646169128946503646 )) ); cuda_assert( cudaDeviceSynchronize() ); //4 - s) // s = 1/602.39521910453439454428( p1 * ( 1/602.39521910453439454428 * p2 * p3 ) ) = (p1 p2 p3)/362880 kernel_assert( (Zgemm<<<mm_grids, mm_threads>>>( p2p3, p2, p3, dim, 0.0016600397351866578333 )) ); cuda_assert( cudaDeviceSynchronize() ); kernel_assert( (Zgemm<<<mm_grids, mm_threads>>>( s, p1, p2p3, dim, 0.0016600397351866578333 )) ); cuda_assert( cudaDeviceSynchronize() ); //5) if ( scaler != 0 ) { for ( unsigned long index = 0; index != scaler; ++index ) { kernel_assert( (Zgemm<<<mm_grids, mm_threads>>>( s_, s, s, dim, 1.0 )) ); cuda_assert( cudaDeviceSynchronize() ); double2* tmp = s_; s_ = s; s = tmp; } } //6) //kernel_assert( (extract_intensity_diff<<<1,dim>>>( s, I_exp, I_diff, dim, column_index )) ); double const ac_offset = cuda_ug[0]; double const dc_offset = cuda_ug[1]; kernel_assert( (extract_intensity_diff_with_offset<<<1,dim>>>( s, I_exp, I_diff, dim, column_index, ac_offset, dc_offset )) ); cuda_assert( cudaDeviceSynchronize() ); } void make_pattern_intensity_diff( double* cuda_ug, unsigned long* cuda_ar, double* cuda_diag, double thickness, unsigned long* cuda_dim, double* cuda_I_exp, double* cuda_I_diff, unsigned long column_index, double2* cuda_cache, unsigned long tilt_size, unsigned long max_dim ) { unsigned long const threads = 64; unsigned long const grids = (tilt_size + threads - 1)/threads; kernel_assert( ( make_individual_pattern_intensity_diff<<<grids, threads>>>( cuda_ug, cuda_ar, cuda_diag, thickness, cuda_dim, cuda_I_exp, cuda_I_diff, column_index, cuda_cache, max_dim, tilt_size ) ) ); cuda_assert( cudaDeviceSynchronize() ); }
aae92c0116108b8128df79d8e2992690e485e961.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdint.h> #include <string.h> #include "md5.cu" char* hash(const char* h_str) { char* d_str; unsigned char* h_res = (unsigned char*)malloc(sizeof(unsigned char)*(32 + 1)); unsigned char* d_res; hipMalloc((void**)&d_str, sizeof(char) * strlen(h_str)); hipMalloc((void**)&d_res, sizeof(char) * 32); hipMemcpy(d_str, h_str, sizeof(char) * strlen(h_str), hipMemcpyHostToDevice); hipLaunchKernelGGL(( md5), dim3(1), dim3(1), 0, 0, d_str, (uint32_t)strlen(h_str), d_res); hipMemcpy(h_res, d_res, sizeof(unsigned char)*(32), hipMemcpyDeviceToHost); hipFree(d_str); hipFree(d_res); char* res = (char*)malloc(sizeof(char)*32); for (int i = 0; i < 16; i++) { sprintf(&res[i*2], "%2.2x", h_res[i]); } return res; } int run_test(const char* name, const char* result, const char* expected) { if (strcmp(expected, result) == 0) { printf("TEST PASSED: %s: expected %s, got %s\n", name, expected, result); return 1; } else { printf("TEST FAILED: %s: expected %s, got %s\n", name, expected, result); return 0; } } int main() { int passed = 0, failed = 0; run_test("md5(\"\")", hash(""), "d41d8cd98f00b204e9800998ecf8427e") ? passed++ : failed++; run_test("md5(\"a\")", hash("a"), "0cc175b9c0f1b6a831c399e269772661") ? passed++ : failed++; run_test("md5(\"abc\")", hash("abc"), "900150983cd24fb0d6963f7d28e17f72") ? passed++ : failed++; run_test("md5(\"message digest\")", hash("message digest"), "f96b697d7cb7938d525a2f31aaf161d0") ? passed++ : failed++; run_test("md5(\"abcdefghijklmnopqrstuvwxyz\")", \ hash("abcdefghijklmnopqrstuvwxyz"), \ "c3fcd3d76192e4007dfb496cca67e13b") ? passed++ : failed++; run_test("md5(\"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789\")", \ hash("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"), \ "d174ab98d277d9f5a5611c2c9f419d9f") ? passed++ : failed++; run_test("md5(\"12345678901234567890123456789012345678901234567890123456789012345678901234567890\")", \ hash("12345678901234567890123456789012345678901234567890123456789012345678901234567890"), \ "57edf4a22be3c955ac49da2e2107b67a") ? passed++ : failed++; printf("Tests Passed: %i\n", passed); printf("Tests Failed: %i\n", failed); return failed; }
aae92c0116108b8128df79d8e2992690e485e961.cu
#include <cuda.h> #include <stdint.h> #include <string.h> #include "md5.cu" char* hash(const char* h_str) { char* d_str; unsigned char* h_res = (unsigned char*)malloc(sizeof(unsigned char)*(32 + 1)); unsigned char* d_res; cudaMalloc((void**)&d_str, sizeof(char) * strlen(h_str)); cudaMalloc((void**)&d_res, sizeof(char) * 32); cudaMemcpy(d_str, h_str, sizeof(char) * strlen(h_str), cudaMemcpyHostToDevice); md5<<<1, 1>>>(d_str, (uint32_t)strlen(h_str), d_res); cudaMemcpy(h_res, d_res, sizeof(unsigned char)*(32), cudaMemcpyDeviceToHost); cudaFree(d_str); cudaFree(d_res); char* res = (char*)malloc(sizeof(char)*32); for (int i = 0; i < 16; i++) { sprintf(&res[i*2], "%2.2x", h_res[i]); } return res; } int run_test(const char* name, const char* result, const char* expected) { if (strcmp(expected, result) == 0) { printf("TEST PASSED: %s: expected %s, got %s\n", name, expected, result); return 1; } else { printf("TEST FAILED: %s: expected %s, got %s\n", name, expected, result); return 0; } } int main() { int passed = 0, failed = 0; run_test("md5(\"\")", hash(""), "d41d8cd98f00b204e9800998ecf8427e") ? passed++ : failed++; run_test("md5(\"a\")", hash("a"), "0cc175b9c0f1b6a831c399e269772661") ? passed++ : failed++; run_test("md5(\"abc\")", hash("abc"), "900150983cd24fb0d6963f7d28e17f72") ? passed++ : failed++; run_test("md5(\"message digest\")", hash("message digest"), "f96b697d7cb7938d525a2f31aaf161d0") ? passed++ : failed++; run_test("md5(\"abcdefghijklmnopqrstuvwxyz\")", \ hash("abcdefghijklmnopqrstuvwxyz"), \ "c3fcd3d76192e4007dfb496cca67e13b") ? passed++ : failed++; run_test("md5(\"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789\")", \ hash("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"), \ "d174ab98d277d9f5a5611c2c9f419d9f") ? passed++ : failed++; run_test("md5(\"12345678901234567890123456789012345678901234567890123456789012345678901234567890\")", \ hash("12345678901234567890123456789012345678901234567890123456789012345678901234567890"), \ "57edf4a22be3c955ac49da2e2107b67a") ? passed++ : failed++; printf("Tests Passed: %i\n", passed); printf("Tests Failed: %i\n", failed); return failed; }
7011457402acb1e544d0fba5c45a5a87b55f4613.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> const int N = 1; const int blocksize = 1; __global__ void kernelFunc() { } int main() { int b[N] = {4}; int *bd; const int isize = N*sizeof(int); printf("%i", *b); hipMalloc( (void**)&bd, isize ); hipMemcpy( bd, b, isize, hipMemcpyHostToDevice ); // Allocate a big chunk of memory as a trigger const int cnst = 1000000000; int *d_ptr; hipMalloc(&d_ptr, cnst * sizeof(int)); dim3 dimBlock( blocksize, 1 ); dim3 dimGrid( 1, 1 ); hipLaunchKernelGGL(( kernelFunc), dim3(dimGrid), dim3(dimBlock), 0, 0, ); hipMemcpy( b, bd, isize, hipMemcpyDeviceToHost ); hipFree( bd ); hipFree( d_ptr ); printf(" %i\n", *b); return EXIT_SUCCESS; }
7011457402acb1e544d0fba5c45a5a87b55f4613.cu
#include <stdio.h> const int N = 1; const int blocksize = 1; __global__ void kernelFunc() { } int main() { int b[N] = {4}; int *bd; const int isize = N*sizeof(int); printf("%i", *b); cudaMalloc( (void**)&bd, isize ); cudaMemcpy( bd, b, isize, cudaMemcpyHostToDevice ); // Allocate a big chunk of memory as a trigger const int cnst = 1000000000; int *d_ptr; cudaMalloc(&d_ptr, cnst * sizeof(int)); dim3 dimBlock( blocksize, 1 ); dim3 dimGrid( 1, 1 ); kernelFunc<<<dimGrid, dimBlock>>>(); cudaMemcpy( b, bd, isize, cudaMemcpyDeviceToHost ); cudaFree( bd ); cudaFree( d_ptr ); printf(" %i\n", *b); return EXIT_SUCCESS; }
d5e0df01f76c607f22442de1cfa37d6644427973.hip
// !!! This is a file automatically generated by hipify!!! // EMD approximation module (based on auction algorithm) // author: Minghua Liu #include <stdio.h> #include <ATen/ATen.h> #include <hip/hip_runtime.h> #include <iostream> #include <hip/hip_runtime.h> __device__ __forceinline__ float atomicMax(float *address, float val) { int ret = __float_as_int(*address); while(val > __int_as_float(ret)) { int old = ret; if((ret = atomicCAS((int *)address, old, __float_as_int(val))) == old) break; } return __int_as_float(ret); } __global__ void clear(int b, int * cnt_tmp, int * unass_cnt) { for (int i = threadIdx.x; i < b; i += blockDim.x) { cnt_tmp[i] = 0; unass_cnt[i] = 0; } } __global__ void calc_unass_cnt(int b, int n, int * assignment, int * unass_cnt) { // count the number of unassigned points in each batch const int BLOCK_SIZE = 1024; __shared__ int scan_array[BLOCK_SIZE]; for (int i = blockIdx.x; i < b; i += gridDim.x) { scan_array[threadIdx.x] = assignment[i * n + blockIdx.y * BLOCK_SIZE + threadIdx.x] == -1 ? 1 : 0; __syncthreads(); int stride = 1; while(stride <= BLOCK_SIZE / 2) { int index = (threadIdx.x + 1) * stride * 2 - 1; if(index < BLOCK_SIZE) scan_array[index] += scan_array[index - stride]; stride = stride * 2; __syncthreads(); } __syncthreads(); if (threadIdx.x == BLOCK_SIZE - 1) { atomicAdd(&unass_cnt[i], scan_array[threadIdx.x]); } __syncthreads(); } } __global__ void calc_unass_cnt_sum(int b, int * unass_cnt, int * unass_cnt_sum) { // count the cumulative sum over over unass_cnt const int BLOCK_SIZE = 512; // batch_size <= 512 __shared__ int scan_array[BLOCK_SIZE]; scan_array[threadIdx.x] = unass_cnt[threadIdx.x]; __syncthreads(); int stride = 1; while(stride <= BLOCK_SIZE / 2) { int index = (threadIdx.x + 1) * stride * 2 - 1; if(index < BLOCK_SIZE) scan_array[index] += scan_array[index - stride]; stride = stride * 2; __syncthreads(); } __syncthreads(); stride = BLOCK_SIZE / 4; while(stride > 0) { int index = (threadIdx.x + 1) * stride * 2 - 1; if((index + stride) < BLOCK_SIZE) scan_array[index + stride] += scan_array[index]; stride = stride / 2; __syncthreads(); } __syncthreads(); //printf("%d\n", unass_cnt_sum[b - 1]); unass_cnt_sum[threadIdx.x] = scan_array[threadIdx.x]; } __global__ void calc_unass_idx(int b, int n, int * assignment, int * unass_idx, int * unass_cnt, int * unass_cnt_sum, int * cnt_tmp) { // list all the unassigned points for (int i = blockIdx.x; i < b; i += gridDim.x) { if (assignment[i * n + blockIdx.y * 1024 + threadIdx.x] == -1) { int idx = atomicAdd(&cnt_tmp[i], 1); unass_idx[unass_cnt_sum[i] - unass_cnt[i] + idx] = blockIdx.y * 1024 + threadIdx.x; } } } __global__ void Bid(int b, int n, const float * xyz1, const float * xyz2, float eps, int * assignment, int * assignment_inv, float * price, int * bid, float * bid_increments, float * max_increments, int * unass_cnt, int * unass_cnt_sum, int * unass_idx) { const int batch = 2048, block_size = 1024, block_cnt = n / 1024; __shared__ float xyz2_buf[batch * 3]; __shared__ float price_buf[batch]; __shared__ float best_buf[block_size]; __shared__ float better_buf[block_size]; __shared__ int best_i_buf[block_size]; for (int i = blockIdx.x; i < b; i += gridDim.x) { int _unass_cnt = unass_cnt[i]; if (_unass_cnt == 0) continue; int _unass_cnt_sum = unass_cnt_sum[i]; int unass_per_block = (_unass_cnt + block_cnt - 1) / block_cnt; int thread_per_unass = block_size / unass_per_block; int unass_this_block = max(min(_unass_cnt - (int) blockIdx.y * unass_per_block, unass_per_block), 0); float x1, y1, z1, best = -1e9, better = -1e9; int best_i = -1, _unass_id = -1, thread_in_unass; if (threadIdx.x < thread_per_unass * unass_this_block) { _unass_id = unass_per_block * blockIdx.y + threadIdx.x / thread_per_unass + _unass_cnt_sum - _unass_cnt; _unass_id = unass_idx[_unass_id]; thread_in_unass = threadIdx.x % thread_per_unass; x1 = xyz1[(i * n + _unass_id) * 3 + 0]; y1 = xyz1[(i * n + _unass_id) * 3 + 1]; z1 = xyz1[(i * n + _unass_id) * 3 + 2]; } for (int k2 = 0; k2 < n; k2 += batch) { int end_k = min(n, k2 + batch) - k2; for (int j = threadIdx.x; j < end_k * 3; j += blockDim.x) { xyz2_buf[j] = xyz2[(i * n + k2) * 3 + j]; } for (int j = threadIdx.x; j < end_k; j += blockDim.x) { price_buf[j] = price[i * n + k2 + j]; } __syncthreads(); if (_unass_id != -1) { int delta = (end_k + thread_per_unass - 1) / thread_per_unass; int l = thread_in_unass * delta; int r = min((thread_in_unass + 1) * delta, end_k); for (int k = l; k < r; k++) //if (!last || assignment_inv[i * n + k + k2] == -1) { float x2 = xyz2_buf[k * 3 + 0] - x1; float y2 = xyz2_buf[k * 3 + 1] - y1; float z2 = xyz2_buf[k * 3 + 2] - z1; // the coordinates of points should be normalized to [0, 1] float d = 3.0 - sqrtf(x2 * x2 + y2 * y2 + z2 * z2) - price_buf[k]; if (d > best) { better = best; best = d; best_i = k + k2; } else if (d > better) { better = d; } } } __syncthreads(); } best_buf[threadIdx.x] = best; better_buf[threadIdx.x] = better; best_i_buf[threadIdx.x] = best_i; __syncthreads(); if (_unass_id != -1 && thread_in_unass == 0) { for (int j = threadIdx.x + 1; j < threadIdx.x + thread_per_unass; j++) { if (best_buf[j] > best) { better = max(best, better_buf[j]); best = best_buf[j]; best_i = best_i_buf[j]; } else better = max(better, best_buf[j]); } bid[i * n + _unass_id] = best_i; bid_increments[i * n + _unass_id] = best - better + eps; atomicMax(&max_increments[i * n + best_i], best - better + eps); } } } __global__ void GetMax(int b, int n, int * assignment, int * bid, float * bid_increments, float * max_increments, int * max_idx) { for (int i = blockIdx.x; i < b; i += gridDim.x) { int j = threadIdx.x + blockIdx.y * blockDim.x; if (assignment[i * n + j] == -1) { int bid_id = bid[i * n + j]; float bid_inc = bid_increments[i * n + j]; float max_inc = max_increments[i * n + bid_id]; if (bid_inc - 1e-6 <= max_inc && max_inc <= bid_inc + 1e-6) { max_idx[i * n + bid_id] = j; } } } } __global__ void Assign(int b, int n, int * assignment, int * assignment_inv, float * price, int * bid, float * bid_increments, float * max_increments, int * max_idx, bool last) { for (int i = blockIdx.x; i < b; i += gridDim.x) { int j = threadIdx.x + blockIdx.y * blockDim.x; if (assignment[i * n + j] == -1) { int bid_id = bid[i * n + j]; if (last || max_idx[i * n + bid_id] == j) { float bid_inc = bid_increments[i * n + j]; int ass_inv = assignment_inv[i * n + bid_id]; if (!last && ass_inv != -1) { assignment[i * n + ass_inv] = -1; } assignment_inv[i * n + bid_id] = j; assignment[i * n + j] = bid_id; price[i * n + bid_id] += bid_inc; max_increments[i * n + bid_id] = -1e9; } } } } __global__ void CalcDist(int b, int n, float * xyz1, float * xyz2, float * dist, int * assignment) { for (int i = blockIdx.x; i < b; i += gridDim.x) { int j = threadIdx.x + blockIdx.y * blockDim.x; int k = assignment[i * n + j]; float deltax = xyz1[(i * n + j) * 3 + 0] - xyz2[(i * n + k) * 3 + 0]; float deltay = xyz1[(i * n + j) * 3 + 1] - xyz2[(i * n + k) * 3 + 1]; float deltaz = xyz1[(i * n + j) * 3 + 2] - xyz2[(i * n + k) * 3 + 2]; dist[i * n + j] = deltax * deltax + deltay * deltay + deltaz * deltaz; } } int emd_cuda_forward(at::Tensor xyz1, at::Tensor xyz2, at::Tensor dist, at::Tensor assignment, at::Tensor price, at::Tensor assignment_inv, at::Tensor bid, at::Tensor bid_increments, at::Tensor max_increments, at::Tensor unass_idx, at::Tensor unass_cnt, at::Tensor unass_cnt_sum, at::Tensor cnt_tmp, at::Tensor max_idx, float eps, int iters) { const auto batch_size = xyz1.size(0); const auto n = xyz1.size(1); //num_points point cloud A const auto m = xyz2.size(1); //num_points point cloud B if (n != m) { printf("Input Error! The two point clouds should have the same size.\n"); return -1; } if (batch_size > 512) { printf("Input Error! The batch size should be less than 512.\n"); return -1; } if (n % 1024 != 0) { printf("Input Error! The size of the point clouds should be a multiple of 1024.\n"); return -1; } //hipEvent_t start,stop; //hipEventCreate(&start); //hipEventCreate(&stop); //hipEventRecord(start); //int iters = 50; for (int i = 0; i < iters; i++) { hipLaunchKernelGGL(( clear), dim3(1), dim3(batch_size), 0, 0, batch_size, cnt_tmp.data<int>(), unass_cnt.data<int>()); hipLaunchKernelGGL(( calc_unass_cnt), dim3(dim3(batch_size, n / 1024, 1)), dim3(1024), 0, 0, batch_size, n, assignment.data<int>(), unass_cnt.data<int>()); hipLaunchKernelGGL(( calc_unass_cnt_sum), dim3(1), dim3(batch_size), 0, 0, batch_size, unass_cnt.data<int>(), unass_cnt_sum.data<int>()); hipLaunchKernelGGL(( calc_unass_idx), dim3(dim3(batch_size, n / 1024, 1)), dim3(1024), 0, 0, batch_size, n, assignment.data<int>(), unass_idx.data<int>(), unass_cnt.data<int>(), unass_cnt_sum.data<int>(), cnt_tmp.data<int>()); hipLaunchKernelGGL(( Bid), dim3(dim3(batch_size, n / 1024, 1)), dim3(1024), 0, 0, batch_size, n, xyz1.data<float>(), xyz2.data<float>(), eps, assignment.data<int>(), assignment_inv.data<int>(), price.data<float>(), bid.data<int>(), bid_increments.data<float>(), max_increments.data<float>(), unass_cnt.data<int>(), unass_cnt_sum.data<int>(), unass_idx.data<int>()); hipLaunchKernelGGL(( GetMax), dim3(dim3(batch_size, n / 1024, 1)), dim3(1024), 0, 0, batch_size, n, assignment.data<int>(), bid.data<int>(), bid_increments.data<float>(), max_increments.data<float>(), max_idx.data<int>()); hipLaunchKernelGGL(( Assign), dim3(dim3(batch_size, n / 1024, 1)), dim3(1024), 0, 0, batch_size, n, assignment.data<int>(), assignment_inv.data<int>(), price.data<float>(), bid.data<int>(), bid_increments.data<float>(), max_increments.data<float>(), max_idx.data<int>(), i == iters - 1); } hipLaunchKernelGGL(( CalcDist), dim3(dim3(batch_size, n / 1024, 1)), dim3(1024), 0, 0, batch_size, n, xyz1.data<float>(), xyz2.data<float>(), dist.data<float>(), assignment.data<int>()); //hipEventRecord(stop); //hipEventSynchronize(stop); //float elapsedTime; //hipEventElapsedTime(&elapsedTime,start,stop); //printf("%lf\n", elapsedTime); hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("error in nnd Output: %s\n", hipGetErrorString(err)); return 0; } return 1; } __global__ void NmDistanceGradKernel(int b, int n, const float * xyz1, const float * xyz2, const float * grad_dist, const int * idx, float * grad_xyz){ for (int i = blockIdx.x; i < b; i += gridDim.x) { for (int j = threadIdx.x + blockIdx.y * blockDim.x; j < n; j += blockDim.x * gridDim.y) { float x1 = xyz1[(i * n + j) * 3 + 0]; float y1 = xyz1[(i * n + j) * 3 + 1]; float z1 = xyz1[(i * n + j) * 3 + 2]; int j2 = idx[i * n + j]; float x2 = xyz2[(i * n + j2) * 3 + 0]; float y2 = xyz2[(i * n + j2) * 3 + 1]; float z2 = xyz2[(i * n + j2) * 3 + 2]; float g = grad_dist[i * n + j] * 2; atomicAdd(&(grad_xyz[(i * n + j) * 3 + 0]), g * (x1 - x2)); atomicAdd(&(grad_xyz[(i * n + j) * 3 + 1]), g * (y1 - y2)); atomicAdd(&(grad_xyz[(i * n + j) * 3 + 2]), g * (z1 - z2)); } } } int emd_cuda_backward(at::Tensor xyz1, at::Tensor xyz2, at::Tensor gradxyz, at::Tensor graddist, at::Tensor idx){ const auto batch_size = xyz1.size(0); const auto n = xyz1.size(1); const auto m = xyz2.size(1); hipLaunchKernelGGL(( NmDistanceGradKernel), dim3(dim3(batch_size, n / 1024, 1)), dim3(1024), 0, 0, batch_size, n, xyz1.data<float>(), xyz2.data<float>(), graddist.data<float>(), idx.data<int>(), gradxyz.data<float>()); hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("error in nnd get grad: %s\n", hipGetErrorString(err)); return 0; } return 1; }
d5e0df01f76c607f22442de1cfa37d6644427973.cu
// EMD approximation module (based on auction algorithm) // author: Minghua Liu #include <stdio.h> #include <ATen/ATen.h> #include <cuda.h> #include <iostream> #include <cuda_runtime.h> __device__ __forceinline__ float atomicMax(float *address, float val) { int ret = __float_as_int(*address); while(val > __int_as_float(ret)) { int old = ret; if((ret = atomicCAS((int *)address, old, __float_as_int(val))) == old) break; } return __int_as_float(ret); } __global__ void clear(int b, int * cnt_tmp, int * unass_cnt) { for (int i = threadIdx.x; i < b; i += blockDim.x) { cnt_tmp[i] = 0; unass_cnt[i] = 0; } } __global__ void calc_unass_cnt(int b, int n, int * assignment, int * unass_cnt) { // count the number of unassigned points in each batch const int BLOCK_SIZE = 1024; __shared__ int scan_array[BLOCK_SIZE]; for (int i = blockIdx.x; i < b; i += gridDim.x) { scan_array[threadIdx.x] = assignment[i * n + blockIdx.y * BLOCK_SIZE + threadIdx.x] == -1 ? 1 : 0; __syncthreads(); int stride = 1; while(stride <= BLOCK_SIZE / 2) { int index = (threadIdx.x + 1) * stride * 2 - 1; if(index < BLOCK_SIZE) scan_array[index] += scan_array[index - stride]; stride = stride * 2; __syncthreads(); } __syncthreads(); if (threadIdx.x == BLOCK_SIZE - 1) { atomicAdd(&unass_cnt[i], scan_array[threadIdx.x]); } __syncthreads(); } } __global__ void calc_unass_cnt_sum(int b, int * unass_cnt, int * unass_cnt_sum) { // count the cumulative sum over over unass_cnt const int BLOCK_SIZE = 512; // batch_size <= 512 __shared__ int scan_array[BLOCK_SIZE]; scan_array[threadIdx.x] = unass_cnt[threadIdx.x]; __syncthreads(); int stride = 1; while(stride <= BLOCK_SIZE / 2) { int index = (threadIdx.x + 1) * stride * 2 - 1; if(index < BLOCK_SIZE) scan_array[index] += scan_array[index - stride]; stride = stride * 2; __syncthreads(); } __syncthreads(); stride = BLOCK_SIZE / 4; while(stride > 0) { int index = (threadIdx.x + 1) * stride * 2 - 1; if((index + stride) < BLOCK_SIZE) scan_array[index + stride] += scan_array[index]; stride = stride / 2; __syncthreads(); } __syncthreads(); //printf("%d\n", unass_cnt_sum[b - 1]); unass_cnt_sum[threadIdx.x] = scan_array[threadIdx.x]; } __global__ void calc_unass_idx(int b, int n, int * assignment, int * unass_idx, int * unass_cnt, int * unass_cnt_sum, int * cnt_tmp) { // list all the unassigned points for (int i = blockIdx.x; i < b; i += gridDim.x) { if (assignment[i * n + blockIdx.y * 1024 + threadIdx.x] == -1) { int idx = atomicAdd(&cnt_tmp[i], 1); unass_idx[unass_cnt_sum[i] - unass_cnt[i] + idx] = blockIdx.y * 1024 + threadIdx.x; } } } __global__ void Bid(int b, int n, const float * xyz1, const float * xyz2, float eps, int * assignment, int * assignment_inv, float * price, int * bid, float * bid_increments, float * max_increments, int * unass_cnt, int * unass_cnt_sum, int * unass_idx) { const int batch = 2048, block_size = 1024, block_cnt = n / 1024; __shared__ float xyz2_buf[batch * 3]; __shared__ float price_buf[batch]; __shared__ float best_buf[block_size]; __shared__ float better_buf[block_size]; __shared__ int best_i_buf[block_size]; for (int i = blockIdx.x; i < b; i += gridDim.x) { int _unass_cnt = unass_cnt[i]; if (_unass_cnt == 0) continue; int _unass_cnt_sum = unass_cnt_sum[i]; int unass_per_block = (_unass_cnt + block_cnt - 1) / block_cnt; int thread_per_unass = block_size / unass_per_block; int unass_this_block = max(min(_unass_cnt - (int) blockIdx.y * unass_per_block, unass_per_block), 0); float x1, y1, z1, best = -1e9, better = -1e9; int best_i = -1, _unass_id = -1, thread_in_unass; if (threadIdx.x < thread_per_unass * unass_this_block) { _unass_id = unass_per_block * blockIdx.y + threadIdx.x / thread_per_unass + _unass_cnt_sum - _unass_cnt; _unass_id = unass_idx[_unass_id]; thread_in_unass = threadIdx.x % thread_per_unass; x1 = xyz1[(i * n + _unass_id) * 3 + 0]; y1 = xyz1[(i * n + _unass_id) * 3 + 1]; z1 = xyz1[(i * n + _unass_id) * 3 + 2]; } for (int k2 = 0; k2 < n; k2 += batch) { int end_k = min(n, k2 + batch) - k2; for (int j = threadIdx.x; j < end_k * 3; j += blockDim.x) { xyz2_buf[j] = xyz2[(i * n + k2) * 3 + j]; } for (int j = threadIdx.x; j < end_k; j += blockDim.x) { price_buf[j] = price[i * n + k2 + j]; } __syncthreads(); if (_unass_id != -1) { int delta = (end_k + thread_per_unass - 1) / thread_per_unass; int l = thread_in_unass * delta; int r = min((thread_in_unass + 1) * delta, end_k); for (int k = l; k < r; k++) //if (!last || assignment_inv[i * n + k + k2] == -1) { float x2 = xyz2_buf[k * 3 + 0] - x1; float y2 = xyz2_buf[k * 3 + 1] - y1; float z2 = xyz2_buf[k * 3 + 2] - z1; // the coordinates of points should be normalized to [0, 1] float d = 3.0 - sqrtf(x2 * x2 + y2 * y2 + z2 * z2) - price_buf[k]; if (d > best) { better = best; best = d; best_i = k + k2; } else if (d > better) { better = d; } } } __syncthreads(); } best_buf[threadIdx.x] = best; better_buf[threadIdx.x] = better; best_i_buf[threadIdx.x] = best_i; __syncthreads(); if (_unass_id != -1 && thread_in_unass == 0) { for (int j = threadIdx.x + 1; j < threadIdx.x + thread_per_unass; j++) { if (best_buf[j] > best) { better = max(best, better_buf[j]); best = best_buf[j]; best_i = best_i_buf[j]; } else better = max(better, best_buf[j]); } bid[i * n + _unass_id] = best_i; bid_increments[i * n + _unass_id] = best - better + eps; atomicMax(&max_increments[i * n + best_i], best - better + eps); } } } __global__ void GetMax(int b, int n, int * assignment, int * bid, float * bid_increments, float * max_increments, int * max_idx) { for (int i = blockIdx.x; i < b; i += gridDim.x) { int j = threadIdx.x + blockIdx.y * blockDim.x; if (assignment[i * n + j] == -1) { int bid_id = bid[i * n + j]; float bid_inc = bid_increments[i * n + j]; float max_inc = max_increments[i * n + bid_id]; if (bid_inc - 1e-6 <= max_inc && max_inc <= bid_inc + 1e-6) { max_idx[i * n + bid_id] = j; } } } } __global__ void Assign(int b, int n, int * assignment, int * assignment_inv, float * price, int * bid, float * bid_increments, float * max_increments, int * max_idx, bool last) { for (int i = blockIdx.x; i < b; i += gridDim.x) { int j = threadIdx.x + blockIdx.y * blockDim.x; if (assignment[i * n + j] == -1) { int bid_id = bid[i * n + j]; if (last || max_idx[i * n + bid_id] == j) { float bid_inc = bid_increments[i * n + j]; int ass_inv = assignment_inv[i * n + bid_id]; if (!last && ass_inv != -1) { assignment[i * n + ass_inv] = -1; } assignment_inv[i * n + bid_id] = j; assignment[i * n + j] = bid_id; price[i * n + bid_id] += bid_inc; max_increments[i * n + bid_id] = -1e9; } } } } __global__ void CalcDist(int b, int n, float * xyz1, float * xyz2, float * dist, int * assignment) { for (int i = blockIdx.x; i < b; i += gridDim.x) { int j = threadIdx.x + blockIdx.y * blockDim.x; int k = assignment[i * n + j]; float deltax = xyz1[(i * n + j) * 3 + 0] - xyz2[(i * n + k) * 3 + 0]; float deltay = xyz1[(i * n + j) * 3 + 1] - xyz2[(i * n + k) * 3 + 1]; float deltaz = xyz1[(i * n + j) * 3 + 2] - xyz2[(i * n + k) * 3 + 2]; dist[i * n + j] = deltax * deltax + deltay * deltay + deltaz * deltaz; } } int emd_cuda_forward(at::Tensor xyz1, at::Tensor xyz2, at::Tensor dist, at::Tensor assignment, at::Tensor price, at::Tensor assignment_inv, at::Tensor bid, at::Tensor bid_increments, at::Tensor max_increments, at::Tensor unass_idx, at::Tensor unass_cnt, at::Tensor unass_cnt_sum, at::Tensor cnt_tmp, at::Tensor max_idx, float eps, int iters) { const auto batch_size = xyz1.size(0); const auto n = xyz1.size(1); //num_points point cloud A const auto m = xyz2.size(1); //num_points point cloud B if (n != m) { printf("Input Error! The two point clouds should have the same size.\n"); return -1; } if (batch_size > 512) { printf("Input Error! The batch size should be less than 512.\n"); return -1; } if (n % 1024 != 0) { printf("Input Error! The size of the point clouds should be a multiple of 1024.\n"); return -1; } //cudaEvent_t start,stop; //cudaEventCreate(&start); //cudaEventCreate(&stop); //cudaEventRecord(start); //int iters = 50; for (int i = 0; i < iters; i++) { clear<<<1, batch_size>>>(batch_size, cnt_tmp.data<int>(), unass_cnt.data<int>()); calc_unass_cnt<<<dim3(batch_size, n / 1024, 1), 1024>>>(batch_size, n, assignment.data<int>(), unass_cnt.data<int>()); calc_unass_cnt_sum<<<1, batch_size>>>(batch_size, unass_cnt.data<int>(), unass_cnt_sum.data<int>()); calc_unass_idx<<<dim3(batch_size, n / 1024, 1), 1024>>>(batch_size, n, assignment.data<int>(), unass_idx.data<int>(), unass_cnt.data<int>(), unass_cnt_sum.data<int>(), cnt_tmp.data<int>()); Bid<<<dim3(batch_size, n / 1024, 1), 1024>>>(batch_size, n, xyz1.data<float>(), xyz2.data<float>(), eps, assignment.data<int>(), assignment_inv.data<int>(), price.data<float>(), bid.data<int>(), bid_increments.data<float>(), max_increments.data<float>(), unass_cnt.data<int>(), unass_cnt_sum.data<int>(), unass_idx.data<int>()); GetMax<<<dim3(batch_size, n / 1024, 1), 1024>>>(batch_size, n, assignment.data<int>(), bid.data<int>(), bid_increments.data<float>(), max_increments.data<float>(), max_idx.data<int>()); Assign<<<dim3(batch_size, n / 1024, 1), 1024>>>(batch_size, n, assignment.data<int>(), assignment_inv.data<int>(), price.data<float>(), bid.data<int>(), bid_increments.data<float>(), max_increments.data<float>(), max_idx.data<int>(), i == iters - 1); } CalcDist<<<dim3(batch_size, n / 1024, 1), 1024>>>(batch_size, n, xyz1.data<float>(), xyz2.data<float>(), dist.data<float>(), assignment.data<int>()); //cudaEventRecord(stop); //cudaEventSynchronize(stop); //float elapsedTime; //cudaEventElapsedTime(&elapsedTime,start,stop); //printf("%lf\n", elapsedTime); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in nnd Output: %s\n", cudaGetErrorString(err)); return 0; } return 1; } __global__ void NmDistanceGradKernel(int b, int n, const float * xyz1, const float * xyz2, const float * grad_dist, const int * idx, float * grad_xyz){ for (int i = blockIdx.x; i < b; i += gridDim.x) { for (int j = threadIdx.x + blockIdx.y * blockDim.x; j < n; j += blockDim.x * gridDim.y) { float x1 = xyz1[(i * n + j) * 3 + 0]; float y1 = xyz1[(i * n + j) * 3 + 1]; float z1 = xyz1[(i * n + j) * 3 + 2]; int j2 = idx[i * n + j]; float x2 = xyz2[(i * n + j2) * 3 + 0]; float y2 = xyz2[(i * n + j2) * 3 + 1]; float z2 = xyz2[(i * n + j2) * 3 + 2]; float g = grad_dist[i * n + j] * 2; atomicAdd(&(grad_xyz[(i * n + j) * 3 + 0]), g * (x1 - x2)); atomicAdd(&(grad_xyz[(i * n + j) * 3 + 1]), g * (y1 - y2)); atomicAdd(&(grad_xyz[(i * n + j) * 3 + 2]), g * (z1 - z2)); } } } int emd_cuda_backward(at::Tensor xyz1, at::Tensor xyz2, at::Tensor gradxyz, at::Tensor graddist, at::Tensor idx){ const auto batch_size = xyz1.size(0); const auto n = xyz1.size(1); const auto m = xyz2.size(1); NmDistanceGradKernel<<<dim3(batch_size, n / 1024, 1), 1024>>>(batch_size, n, xyz1.data<float>(), xyz2.data<float>(), graddist.data<float>(), idx.data<int>(), gradxyz.data<float>()); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in nnd get grad: %s\n", cudaGetErrorString(err)); return 0; } return 1; }
90522fc954c0556ef40eccbf8d5186e4aaaf8f87.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * sphTartakovskyEtAl.cuh * * Author: Kamil Szewc ([email protected]) * Modified on: 26-09-2014 * */ #include <iostream> #include <thrust/device_vector.h> #include "../sph.h" #include "../hlp.h" #include "sphTartakovskyEtAl/sphTartakovskyEtAl.cuh" #include "general/calcNumberOfCells/calcNumberOfCells.cuh" #include "general/calcTimeStep/calcTimeStep.cuh" #include "general/renormalizePressure/renormalizePressure.cuh" #include "../methods/hashSortReorder.cuh" #include "../methods/copyParticles.cuh" #include "../errlog.h" void modelTartakovskyEtAl(int NOB, int TPB, thrust::device_vector<Particle>& pVector, Particle *pSort, uint *gridParticleHash, uint *gridParticleIndex, uint *cellStart, uint *cellEnd, Parameters *par, Parameters *parHost, real time) { STARTLOG("logs/models.log"); Particle* p = thrust::raw_pointer_cast(pVector.data()); calcNumberOfCells(pVector, par, parHost); calcTimeStep(pVector, par, parHost); static bool isConverted = false; if (isConverted == false) { std::cout << "Convertion..." << std::endl; hipLaunchKernelGGL(( calcDeformationSTEA) , dim3(NOB), dim3(TPB), 0, 0, p, par); HANDLE_CUDA_KERNEL_RUNTIME_ERROR("calcDeformationSTEA"); isConverted = true; } hashSortReorder(NOB, TPB, p, par, pSort, gridParticleHash, gridParticleIndex, cellStart, cellEnd, parHost->N); copyParticles << <NOB, TPB >> >(pSort, p, gridParticleIndex, true, par, parHost->N); hipLaunchKernelGGL(( calcDensitySTEA) , dim3(NOB), dim3(TPB), 0, 0, pSort, gridParticleIndex, cellStart, cellEnd, par); HANDLE_CUDA_KERNEL_RUNTIME_ERROR("calcDensitySTEA"); hipLaunchKernelGGL(( calcPressureSTEA) , dim3(NOB), dim3(TPB), 0, 0, pSort, par); HANDLE_CUDA_KERNEL_RUNTIME_ERROR("calcPressureSTEA"); hipLaunchKernelGGL(( calcInteractionSTEA) , dim3(NOB), dim3(TPB), 0, 0, pSort, gridParticleIndex, cellStart, cellEnd, par); HANDLE_CUDA_KERNEL_RUNTIME_ERROR("calcInteractionSTEA"); hipLaunchKernelGGL(( calcAdvectionSTEA) , dim3(NOB), dim3(TPB), 0, 0, pSort, par); HANDLE_CUDA_KERNEL_RUNTIME_ERROR("calcAdvectionSTEA"); copyParticles << <NOB, TPB >> >(p, pSort, gridParticleIndex, false, par, parHost->N); }
90522fc954c0556ef40eccbf8d5186e4aaaf8f87.cu
/* * sphTartakovskyEtAl.cuh * * Author: Kamil Szewc ([email protected]) * Modified on: 26-09-2014 * */ #include <iostream> #include <thrust/device_vector.h> #include "../sph.h" #include "../hlp.h" #include "sphTartakovskyEtAl/sphTartakovskyEtAl.cuh" #include "general/calcNumberOfCells/calcNumberOfCells.cuh" #include "general/calcTimeStep/calcTimeStep.cuh" #include "general/renormalizePressure/renormalizePressure.cuh" #include "../methods/hashSortReorder.cuh" #include "../methods/copyParticles.cuh" #include "../errlog.h" void modelTartakovskyEtAl(int NOB, int TPB, thrust::device_vector<Particle>& pVector, Particle *pSort, uint *gridParticleHash, uint *gridParticleIndex, uint *cellStart, uint *cellEnd, Parameters *par, Parameters *parHost, real time) { STARTLOG("logs/models.log"); Particle* p = thrust::raw_pointer_cast(pVector.data()); calcNumberOfCells(pVector, par, parHost); calcTimeStep(pVector, par, parHost); static bool isConverted = false; if (isConverted == false) { std::cout << "Convertion..." << std::endl; calcDeformationSTEA <<<NOB, TPB>>>(p, par); HANDLE_CUDA_KERNEL_RUNTIME_ERROR("calcDeformationSTEA"); isConverted = true; } hashSortReorder(NOB, TPB, p, par, pSort, gridParticleHash, gridParticleIndex, cellStart, cellEnd, parHost->N); copyParticles << <NOB, TPB >> >(pSort, p, gridParticleIndex, true, par, parHost->N); calcDensitySTEA <<<NOB, TPB>>>(pSort, gridParticleIndex, cellStart, cellEnd, par); HANDLE_CUDA_KERNEL_RUNTIME_ERROR("calcDensitySTEA"); calcPressureSTEA <<<NOB, TPB>>>(pSort, par); HANDLE_CUDA_KERNEL_RUNTIME_ERROR("calcPressureSTEA"); calcInteractionSTEA <<<NOB, TPB>>>(pSort, gridParticleIndex, cellStart, cellEnd, par); HANDLE_CUDA_KERNEL_RUNTIME_ERROR("calcInteractionSTEA"); calcAdvectionSTEA <<<NOB, TPB>>>(pSort, par); HANDLE_CUDA_KERNEL_RUNTIME_ERROR("calcAdvectionSTEA"); copyParticles << <NOB, TPB >> >(p, pSort, gridParticleIndex, false, par, parHost->N); }
c9c4250bfb9eb41e5a4c61553307ac8f90222183.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /* A simple program demonstrating trivial use of global memory atomic * device functions (atomic*() functions). */ // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #ifdef _WIN32 # define WINDOWS_LEAN_AND_MEAN # define NOMINMAX # include <windows.h> #endif // Includes CUDA #include <hip/hip_runtime.h> // Utilities and timing functions #include <helper_functions.h> // includes cuda.h and hip/hip_runtime_api.h // CUDA helper functions #include <helper_cuda.h> // helper functions for CUDA error check // Includes, kernels #include "simpleAtomicIntrinsics_kernel.cuh" const char *sampleName = "simpleAtomicIntrinsics"; //////////////////////////////////////////////////////////////////////////////// // Auto-Verification Code bool testResult = true; //////////////////////////////////////////////////////////////////////////////// // Declaration, forward void runTest(int argc, char **argv); extern "C" bool computeGold(int *gpuData, const int len); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { printf("%s starting...\n", sampleName); runTest(argc, argv); printf("%s completed, returned %s\n", sampleName, testResult ? "OK" : "ERROR!"); exit(testResult ? EXIT_SUCCESS : EXIT_FAILURE); } //////////////////////////////////////////////////////////////////////////////// //! Run a simple test for CUDA //////////////////////////////////////////////////////////////////////////////// void runTest(int argc, char **argv) { hipDeviceProp_t deviceProp; deviceProp.major = 0; deviceProp.minor = 0; int dev = 0; // This will pick the best possible CUDA capable device dev = findCudaDevice(argc, (const char **)argv); checkCudaErrors(hipGetDeviceProperties(&deviceProp, dev)); // Statistics about the GPU device printf("> GPU device has %d Multi-Processors, " "SM %d.%d compute capabilities\n\n", deviceProp.multiProcessorCount, deviceProp.major, deviceProp.minor); int version = (deviceProp.major * 0x10 + deviceProp.minor); if (version < 0x11) { printf("%s: requires a minimum CUDA compute 1.1 capability, waiving testing.\n", sampleName); exit(EXIT_WAIVED); } StopWatchInterface *timer; sdkCreateTimer(&timer); sdkStartTimer(&timer); unsigned int numThreads = 256; unsigned int numBlocks = 64; unsigned int numData = 11; unsigned int memSize = sizeof(int) * numData; //allocate mem for the result on host side int *hOData = (int *) malloc(memSize); //initialize the memory for (unsigned int i = 0; i < numData; i++) hOData[i] = 0; //To make the AND and XOR tests generate something other than 0... hOData[8] = hOData[10] = 0xff; // allocate device memory for result int *dOData; checkCudaErrors(hipMalloc((void **) &dOData, memSize)); // copy host memory to device to initialize to zero checkCudaErrors(hipMemcpy(dOData, hOData, memSize, hipMemcpyHostToDevice)); // execute the kernel hipLaunchKernelGGL(( testKernel), dim3(numBlocks), dim3(numThreads), 0, 0, dOData); getLastCudaError("Kernel execution failed"); //Copy result from device to host checkCudaErrors(hipMemcpy(hOData, dOData, memSize, hipMemcpyDeviceToHost)); sdkStopTimer(&timer); printf("Processing time: %f (ms)\n", sdkGetTimerValue(&timer)); sdkDeleteTimer(&timer); // Compute reference solution testResult = computeGold(hOData, numThreads * numBlocks); // Cleanup memory free(hOData); checkCudaErrors(hipFree(dOData)); }
c9c4250bfb9eb41e5a4c61553307ac8f90222183.cu
/* * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /* A simple program demonstrating trivial use of global memory atomic * device functions (atomic*() functions). */ // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #ifdef _WIN32 # define WINDOWS_LEAN_AND_MEAN # define NOMINMAX # include <windows.h> #endif // Includes CUDA #include <cuda_runtime.h> // Utilities and timing functions #include <helper_functions.h> // includes cuda.h and cuda_runtime_api.h // CUDA helper functions #include <helper_cuda.h> // helper functions for CUDA error check // Includes, kernels #include "simpleAtomicIntrinsics_kernel.cuh" const char *sampleName = "simpleAtomicIntrinsics"; //////////////////////////////////////////////////////////////////////////////// // Auto-Verification Code bool testResult = true; //////////////////////////////////////////////////////////////////////////////// // Declaration, forward void runTest(int argc, char **argv); extern "C" bool computeGold(int *gpuData, const int len); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { printf("%s starting...\n", sampleName); runTest(argc, argv); printf("%s completed, returned %s\n", sampleName, testResult ? "OK" : "ERROR!"); exit(testResult ? EXIT_SUCCESS : EXIT_FAILURE); } //////////////////////////////////////////////////////////////////////////////// //! Run a simple test for CUDA //////////////////////////////////////////////////////////////////////////////// void runTest(int argc, char **argv) { cudaDeviceProp deviceProp; deviceProp.major = 0; deviceProp.minor = 0; int dev = 0; // This will pick the best possible CUDA capable device dev = findCudaDevice(argc, (const char **)argv); checkCudaErrors(cudaGetDeviceProperties(&deviceProp, dev)); // Statistics about the GPU device printf("> GPU device has %d Multi-Processors, " "SM %d.%d compute capabilities\n\n", deviceProp.multiProcessorCount, deviceProp.major, deviceProp.minor); int version = (deviceProp.major * 0x10 + deviceProp.minor); if (version < 0x11) { printf("%s: requires a minimum CUDA compute 1.1 capability, waiving testing.\n", sampleName); exit(EXIT_WAIVED); } StopWatchInterface *timer; sdkCreateTimer(&timer); sdkStartTimer(&timer); unsigned int numThreads = 256; unsigned int numBlocks = 64; unsigned int numData = 11; unsigned int memSize = sizeof(int) * numData; //allocate mem for the result on host side int *hOData = (int *) malloc(memSize); //initialize the memory for (unsigned int i = 0; i < numData; i++) hOData[i] = 0; //To make the AND and XOR tests generate something other than 0... hOData[8] = hOData[10] = 0xff; // allocate device memory for result int *dOData; checkCudaErrors(cudaMalloc((void **) &dOData, memSize)); // copy host memory to device to initialize to zero checkCudaErrors(cudaMemcpy(dOData, hOData, memSize, cudaMemcpyHostToDevice)); // execute the kernel testKernel<<<numBlocks, numThreads>>>(dOData); getLastCudaError("Kernel execution failed"); //Copy result from device to host checkCudaErrors(cudaMemcpy(hOData, dOData, memSize, cudaMemcpyDeviceToHost)); sdkStopTimer(&timer); printf("Processing time: %f (ms)\n", sdkGetTimerValue(&timer)); sdkDeleteTimer(&timer); // Compute reference solution testResult = computeGold(hOData, numThreads * numBlocks); // Cleanup memory free(hOData); checkCudaErrors(cudaFree(dOData)); }
1911f0c3b309f93c7ff289cb095cd752cc66bc62.hip
// !!! This is a file automatically generated by hipify!!! #include <cstdlib> #include <cstdio> #include <string> #include <iostream> #include <color_spinor_field.h> #include <clover_field.h> // these control the Wilson-type actions #ifdef GPU_WILSON_DIRAC //#define DIRECT_ACCESS_LINK //#define DIRECT_ACCESS_WILSON_SPINOR //#define DIRECT_ACCESS_WILSON_ACCUM //#define DIRECT_ACCESS_WILSON_INTER //#define DIRECT_ACCESS_WILSON_PACK_SPINOR //#define DIRECT_ACCESS_CLOVER #endif // GPU_WILSON_DIRAC #include <quda_internal.h> #include <dslash_quda.h> #include <sys/time.h> #include <blas_quda.h> #include <inline_ptx.h> namespace quda { namespace twisted { #undef GPU_STAGGERED_DIRAC #include <dslash_constants.h> #include <dslash_textures.h> #include <dslash_index.cuh> // Enable shared memory dslash for Fermi architecture //#define SHARED_WILSON_DSLASH //#define SHARED_8_BYTE_WORD_SIZE // 8-byte shared memory access #ifdef GPU_TWISTED_MASS_DIRAC #include <tm_dslash_def.h> // Twisted Mass kernels #endif #ifndef DSLASH_SHARED_FLOATS_PER_THREAD #define DSLASH_SHARED_FLOATS_PER_THREAD 0 #endif #include <dslash_quda.cuh> } // end namespace twisted // declare the dslash events #include <dslash_events.cuh> using namespace twisted; #ifdef GPU_TWISTED_MASS_DIRAC template <typename sFloat, typename gFloat> class TwistedDslashCuda : public SharedDslashCuda { private: const QudaTwistDslashType dslashType; double a, b, c, d; protected: unsigned int sharedBytesPerThread() const { if (dslashParam.kernel_type == INTERIOR_KERNEL) { int reg_size = (typeid(sFloat)==typeid(double2) ? sizeof(double) : sizeof(float)); return DSLASH_SHARED_FLOATS_PER_THREAD * reg_size; } else { return 0; } } public: TwistedDslashCuda(cudaColorSpinorField *out, const GaugeField &gauge, const cudaColorSpinorField *in, const cudaColorSpinorField *x, const QudaTwistDslashType dslashType, const double kappa, const double mu, const double epsilon, const double k, const int parity, const int dagger, const int *commOverride) : SharedDslashCuda(out, in, x, gauge, parity, dagger, commOverride), dslashType(dslashType) { a = kappa; b = mu; c = epsilon; d = k; if (dslashType == QUDA_NONDEG_DSLASH) errorQuda("Invalid dslashType for twisted-mass Dslash"); dslashParam.twist_a = (dslashType == QUDA_DEG_TWIST_INV_DSLASH) ? kappa : 0.0; dslashParam.twist_b = (dslashType == QUDA_DEG_TWIST_INV_DSLASH) ? mu : 0.0; dslashParam.a = kappa; dslashParam.a_f = kappa; dslashParam.b = mu; dslashParam.b_f = mu; dslashParam.fl_stride = in->VolumeCB(); } virtual ~TwistedDslashCuda() { unbindSpinorTex<sFloat>(in, out, x); } TuneKey tuneKey() const { TuneKey key = DslashCuda::tuneKey(); switch(dslashType){ case QUDA_DEG_TWIST_INV_DSLASH: strcat(key.aux,",TwistInvDslash"); break; case QUDA_DEG_DSLASH_TWIST_INV: strcat(key.aux,","); break; case QUDA_DEG_DSLASH_TWIST_XPAY: strcat(key.aux,",DslashTwist"); break; default: errorQuda("Unsupported twisted-dslash type %d", dslashType); } return key; } void apply(const hipStream_t &stream) { #ifdef SHARED_WILSON_DSLASH if (dslashParam.kernel_type == EXTERIOR_KERNEL_X) errorQuda("Shared dslash does not yet support X-dimension partitioning"); #endif #ifndef USE_TEXTURE_OBJECTS if (dslashParam.kernel_type == INTERIOR_KERNEL) bindSpinorTex<sFloat>(in, out, x); #endif // USE_TEXTURE_OBJECTS TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); setParam(); dslashParam.block[0] = tp.aux.x; dslashParam.block[1] = tp.aux.y; dslashParam.block[2] = tp.aux.z; dslashParam.block[3] = tp.aux.w; for (int i=0; i<4; i++) dslashParam.grid[i] = ( (i==0 ? 2 : 1) * in->X(i)) / dslashParam.block[i]; switch(dslashType){ case QUDA_DEG_TWIST_INV_DSLASH: DSLASH(twistedMassTwistInvDslash, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam); break; case QUDA_DEG_DSLASH_TWIST_INV: DSLASH(twistedMassDslash, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam); break; case QUDA_DEG_DSLASH_TWIST_XPAY: DSLASH(twistedMassDslashTwist, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam); break; default: errorQuda("Invalid twisted mass dslash type"); } } long long flops() const { int twisted_flops = 48; long long flops = DslashCuda::flops(); switch(dslashParam.kernel_type) { case EXTERIOR_KERNEL_X: case EXTERIOR_KERNEL_Y: case EXTERIOR_KERNEL_Z: case EXTERIOR_KERNEL_T: case EXTERIOR_KERNEL_ALL: break; case INTERIOR_KERNEL: case KERNEL_POLICY: // twisted mass flops are done in the interior kernel flops += twisted_flops * in->VolumeCB(); break; } return flops; } }; #endif // GPU_TWISTED_MASS_DIRAC #include <dslash_policy.cuh> void twistedMassDslashCuda(cudaColorSpinorField *out, const cudaGaugeField &gauge, const cudaColorSpinorField *in, const int parity, const int dagger, const cudaColorSpinorField *x, const QudaTwistDslashType type, const double &kappa, const double &mu, const double &epsilon, const double &k, const int *commOverride, TimeProfile &profile) { #ifdef GPU_TWISTED_MASS_DIRAC const_cast<cudaColorSpinorField*>(in)->createComms(1); if (type == QUDA_DEG_TWIST_INV_DSLASH) setKernelPackT(true); DslashCuda *dslash = nullptr; if (in->Precision() == QUDA_DOUBLE_PRECISION) { dslash = new TwistedDslashCuda<double2,double2>(out, gauge, in, x, type, kappa, mu, epsilon, k, parity, dagger, commOverride); } else if (in->Precision() == QUDA_SINGLE_PRECISION) { dslash = new TwistedDslashCuda<float4,float4>(out, gauge, in, x, type, kappa, mu, epsilon, k, parity, dagger, commOverride); } else if (in->Precision() == QUDA_HALF_PRECISION) { dslash = new TwistedDslashCuda<short4,short4>(out, gauge, in, x, type, kappa, mu, epsilon, k, parity, dagger, commOverride); } DslashPolicyTune dslash_policy(*dslash, const_cast<cudaColorSpinorField*>(in), in->Volume(), in->GhostFace(), profile); dslash_policy.apply(0); delete dslash; if (type == QUDA_DEG_TWIST_INV_DSLASH) setKernelPackT(false); #else errorQuda("Twisted mass dslash has not been built"); #endif } }
1911f0c3b309f93c7ff289cb095cd752cc66bc62.cu
#include <cstdlib> #include <cstdio> #include <string> #include <iostream> #include <color_spinor_field.h> #include <clover_field.h> // these control the Wilson-type actions #ifdef GPU_WILSON_DIRAC //#define DIRECT_ACCESS_LINK //#define DIRECT_ACCESS_WILSON_SPINOR //#define DIRECT_ACCESS_WILSON_ACCUM //#define DIRECT_ACCESS_WILSON_INTER //#define DIRECT_ACCESS_WILSON_PACK_SPINOR //#define DIRECT_ACCESS_CLOVER #endif // GPU_WILSON_DIRAC #include <quda_internal.h> #include <dslash_quda.h> #include <sys/time.h> #include <blas_quda.h> #include <inline_ptx.h> namespace quda { namespace twisted { #undef GPU_STAGGERED_DIRAC #include <dslash_constants.h> #include <dslash_textures.h> #include <dslash_index.cuh> // Enable shared memory dslash for Fermi architecture //#define SHARED_WILSON_DSLASH //#define SHARED_8_BYTE_WORD_SIZE // 8-byte shared memory access #ifdef GPU_TWISTED_MASS_DIRAC #include <tm_dslash_def.h> // Twisted Mass kernels #endif #ifndef DSLASH_SHARED_FLOATS_PER_THREAD #define DSLASH_SHARED_FLOATS_PER_THREAD 0 #endif #include <dslash_quda.cuh> } // end namespace twisted // declare the dslash events #include <dslash_events.cuh> using namespace twisted; #ifdef GPU_TWISTED_MASS_DIRAC template <typename sFloat, typename gFloat> class TwistedDslashCuda : public SharedDslashCuda { private: const QudaTwistDslashType dslashType; double a, b, c, d; protected: unsigned int sharedBytesPerThread() const { if (dslashParam.kernel_type == INTERIOR_KERNEL) { int reg_size = (typeid(sFloat)==typeid(double2) ? sizeof(double) : sizeof(float)); return DSLASH_SHARED_FLOATS_PER_THREAD * reg_size; } else { return 0; } } public: TwistedDslashCuda(cudaColorSpinorField *out, const GaugeField &gauge, const cudaColorSpinorField *in, const cudaColorSpinorField *x, const QudaTwistDslashType dslashType, const double kappa, const double mu, const double epsilon, const double k, const int parity, const int dagger, const int *commOverride) : SharedDslashCuda(out, in, x, gauge, parity, dagger, commOverride), dslashType(dslashType) { a = kappa; b = mu; c = epsilon; d = k; if (dslashType == QUDA_NONDEG_DSLASH) errorQuda("Invalid dslashType for twisted-mass Dslash"); dslashParam.twist_a = (dslashType == QUDA_DEG_TWIST_INV_DSLASH) ? kappa : 0.0; dslashParam.twist_b = (dslashType == QUDA_DEG_TWIST_INV_DSLASH) ? mu : 0.0; dslashParam.a = kappa; dslashParam.a_f = kappa; dslashParam.b = mu; dslashParam.b_f = mu; dslashParam.fl_stride = in->VolumeCB(); } virtual ~TwistedDslashCuda() { unbindSpinorTex<sFloat>(in, out, x); } TuneKey tuneKey() const { TuneKey key = DslashCuda::tuneKey(); switch(dslashType){ case QUDA_DEG_TWIST_INV_DSLASH: strcat(key.aux,",TwistInvDslash"); break; case QUDA_DEG_DSLASH_TWIST_INV: strcat(key.aux,","); break; case QUDA_DEG_DSLASH_TWIST_XPAY: strcat(key.aux,",DslashTwist"); break; default: errorQuda("Unsupported twisted-dslash type %d", dslashType); } return key; } void apply(const cudaStream_t &stream) { #ifdef SHARED_WILSON_DSLASH if (dslashParam.kernel_type == EXTERIOR_KERNEL_X) errorQuda("Shared dslash does not yet support X-dimension partitioning"); #endif #ifndef USE_TEXTURE_OBJECTS if (dslashParam.kernel_type == INTERIOR_KERNEL) bindSpinorTex<sFloat>(in, out, x); #endif // USE_TEXTURE_OBJECTS TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); setParam(); dslashParam.block[0] = tp.aux.x; dslashParam.block[1] = tp.aux.y; dslashParam.block[2] = tp.aux.z; dslashParam.block[3] = tp.aux.w; for (int i=0; i<4; i++) dslashParam.grid[i] = ( (i==0 ? 2 : 1) * in->X(i)) / dslashParam.block[i]; switch(dslashType){ case QUDA_DEG_TWIST_INV_DSLASH: DSLASH(twistedMassTwistInvDslash, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam); break; case QUDA_DEG_DSLASH_TWIST_INV: DSLASH(twistedMassDslash, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam); break; case QUDA_DEG_DSLASH_TWIST_XPAY: DSLASH(twistedMassDslashTwist, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam); break; default: errorQuda("Invalid twisted mass dslash type"); } } long long flops() const { int twisted_flops = 48; long long flops = DslashCuda::flops(); switch(dslashParam.kernel_type) { case EXTERIOR_KERNEL_X: case EXTERIOR_KERNEL_Y: case EXTERIOR_KERNEL_Z: case EXTERIOR_KERNEL_T: case EXTERIOR_KERNEL_ALL: break; case INTERIOR_KERNEL: case KERNEL_POLICY: // twisted mass flops are done in the interior kernel flops += twisted_flops * in->VolumeCB(); break; } return flops; } }; #endif // GPU_TWISTED_MASS_DIRAC #include <dslash_policy.cuh> void twistedMassDslashCuda(cudaColorSpinorField *out, const cudaGaugeField &gauge, const cudaColorSpinorField *in, const int parity, const int dagger, const cudaColorSpinorField *x, const QudaTwistDslashType type, const double &kappa, const double &mu, const double &epsilon, const double &k, const int *commOverride, TimeProfile &profile) { #ifdef GPU_TWISTED_MASS_DIRAC const_cast<cudaColorSpinorField*>(in)->createComms(1); if (type == QUDA_DEG_TWIST_INV_DSLASH) setKernelPackT(true); DslashCuda *dslash = nullptr; if (in->Precision() == QUDA_DOUBLE_PRECISION) { dslash = new TwistedDslashCuda<double2,double2>(out, gauge, in, x, type, kappa, mu, epsilon, k, parity, dagger, commOverride); } else if (in->Precision() == QUDA_SINGLE_PRECISION) { dslash = new TwistedDslashCuda<float4,float4>(out, gauge, in, x, type, kappa, mu, epsilon, k, parity, dagger, commOverride); } else if (in->Precision() == QUDA_HALF_PRECISION) { dslash = new TwistedDslashCuda<short4,short4>(out, gauge, in, x, type, kappa, mu, epsilon, k, parity, dagger, commOverride); } DslashPolicyTune dslash_policy(*dslash, const_cast<cudaColorSpinorField*>(in), in->Volume(), in->GhostFace(), profile); dslash_policy.apply(0); delete dslash; if (type == QUDA_DEG_TWIST_INV_DSLASH) setKernelPackT(false); #else errorQuda("Twisted mass dslash has not been built"); #endif } }
e162d3575e5afc0061f7933035639e08022169a2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "Integrator.hpp" #include <iostream> #include <algorithm> #include <math.h> #include <hiprand/hiprand.h> #include "RandomVariable.hpp" #include "kernel.hpp" #include "config.hpp" #include "cutil.hpp" MonteCarloIntegrator::MonteCarloIntegrator() { this->gen = new hiprandGenerator_t(); hiprandCreateGenerator(this->gen, HIPRAND_RNG_PSEUDO_MTGP32); hiprandSetPseudoRandomGeneratorSeed(*(this->gen), 1234ULL); } MonteCarloIntegrator::~MonteCarloIntegrator() { hiprandDestroyGenerator(*(this->gen)); } //! // Compute DUST for 2 time series. // float MonteCarloIntegrator::distance(const TimeSeries &ts1, const TimeSeries &ts2, int ts_length) { size_t tuples_size = sizeof(float) * ts_length * TUPLE_SIZE; size_t dusts_size = sizeof(float) * ts_length; // copy ts1, ts2 to memory float *tuples, *dusts, *tuples_GPU, *dusts_GPU; tuples = (float*)malloc(tuples_size); dusts = (float*)malloc(dusts_size); checkCudaErrors(hipMalloc((void**)&tuples_GPU, tuples_size)); checkCudaErrors(hipMalloc((void**)&dusts_GPU, dusts_size)); int idx = 0; for (int i = 0; i < ts_length; i++) { RandomVariable x = ts1.at(i); RandomVariable y = ts2.at(i); tuples[idx] = static_cast<float>(x.distribution); tuples[idx + 1] = x.observation; tuples[idx + 2] = x.stddev; tuples[idx + 3] = static_cast<float>(y.distribution); tuples[idx + 4] = y.observation; tuples[idx + 5] = y.stddev; idx += TUPLE_SIZE; } checkCudaErrors(hipMemcpy(tuples_GPU, tuples, tuples_size, hipMemcpyHostToDevice)); // generate uniform random number on samples_GPU float *samples_GPU; checkCudaErrors(hipMalloc((void**)&samples_GPU, sizeof(float) * INTEGRATION_SAMPLES * ts_length * 3)); hiprandGenerateUniform(*(this->gen), samples_GPU, INTEGRATION_SAMPLES * ts_length * 3); // call kernel hipLaunchKernelGGL(( g_distance_kernel), dim3(ts_length), dim3(TPB) , 0, 0, tuples_GPU, samples_GPU, dusts_GPU); checkCudaErrors(hipMemcpy(dusts, dusts_GPU, dusts_size, hipMemcpyDeviceToHost)); float dust_sum = 0; for (int i = 0; i < ts_length; i++) { dust_sum += dusts[i]; // std::cout << "### " << sqrt(dusts[i]) << std::endl; } //std::cout << "ts length: " << ts_length << std::endl; free(tuples); free(dusts); checkCudaErrors(hipFree(tuples_GPU)); checkCudaErrors(hipFree(dusts_GPU)); checkCudaErrors(hipFree(samples_GPU)); return sqrt(dust_sum); } // Match 1 ts to all ts in tsc. // Repeat Integrator::distance for all combination. int MonteCarloIntegrator::match_naive(const TimeSeries &ts, const TimeSeriesCollection &tsc) { // Determine the length of time series. unsigned int ts_length = min(ts.length(), tsc.length_min()); for (int i = 0; i < tsc.sequences.size(); i++) { ts_length = min(ts_length, tsc.sequences[i].length()); } float DUST_min; float i_min; for (int i = 0; i < tsc.sequences.size(); i++) { float DUST = this->distance(ts, tsc.sequences[i], ts_length); if (DUST < DUST_min || i == 0) { DUST_min = DUST; i_min = i; } } std::cout << "matched : " << ts_length << std::endl; std::cout << "\t index: " << i_min << ", distance : " << DUST_min << std::endl; return i_min; } // Match 1 ts to all ts in tsc // Optimized version. int MonteCarloIntegrator::match(const TimeSeries &ts, const TimeSeriesCollection &tsc) { this->prepare_match(ts, tsc); // Generate uniform random number on samples_GPU. size_t samples_num = INTEGRATION_SAMPLES * ts_length * ts_num * 3; checkCudaErrors(hipMalloc((void**)&(samples_D), sizeof(float) * samples_num)); hiprandGenerateUniform(*(this->gen), samples_D, samples_num); hipLaunchKernelGGL(( g_match), dim3(ts_length), dim3(TPB) , 0, 0, ts_D, tsc_D, dusts_D, ts_length, ts_num, this->samples_D); int i_min; float DUST_min; this->finish_match(&i_min, &DUST_min); checkCudaErrors(hipFree(samples_D)); std::cout << "matched : " << ts_length << std::endl; std::cout << "\t index: " << i_min << ", distance: " << DUST_min << std::endl; return i_min; }
e162d3575e5afc0061f7933035639e08022169a2.cu
#include "Integrator.hpp" #include <iostream> #include <algorithm> #include <math.h> #include <curand.h> #include "RandomVariable.hpp" #include "kernel.hpp" #include "config.hpp" #include "cutil.hpp" MonteCarloIntegrator::MonteCarloIntegrator() { this->gen = new curandGenerator_t(); curandCreateGenerator(this->gen, CURAND_RNG_PSEUDO_MTGP32); curandSetPseudoRandomGeneratorSeed(*(this->gen), 1234ULL); } MonteCarloIntegrator::~MonteCarloIntegrator() { curandDestroyGenerator(*(this->gen)); } //! // Compute DUST for 2 time series. // float MonteCarloIntegrator::distance(const TimeSeries &ts1, const TimeSeries &ts2, int ts_length) { size_t tuples_size = sizeof(float) * ts_length * TUPLE_SIZE; size_t dusts_size = sizeof(float) * ts_length; // copy ts1, ts2 to memory float *tuples, *dusts, *tuples_GPU, *dusts_GPU; tuples = (float*)malloc(tuples_size); dusts = (float*)malloc(dusts_size); checkCudaErrors(cudaMalloc((void**)&tuples_GPU, tuples_size)); checkCudaErrors(cudaMalloc((void**)&dusts_GPU, dusts_size)); int idx = 0; for (int i = 0; i < ts_length; i++) { RandomVariable x = ts1.at(i); RandomVariable y = ts2.at(i); tuples[idx] = static_cast<float>(x.distribution); tuples[idx + 1] = x.observation; tuples[idx + 2] = x.stddev; tuples[idx + 3] = static_cast<float>(y.distribution); tuples[idx + 4] = y.observation; tuples[idx + 5] = y.stddev; idx += TUPLE_SIZE; } checkCudaErrors(cudaMemcpy(tuples_GPU, tuples, tuples_size, cudaMemcpyHostToDevice)); // generate uniform random number on samples_GPU float *samples_GPU; checkCudaErrors(cudaMalloc((void**)&samples_GPU, sizeof(float) * INTEGRATION_SAMPLES * ts_length * 3)); curandGenerateUniform(*(this->gen), samples_GPU, INTEGRATION_SAMPLES * ts_length * 3); // call kernel g_distance_kernel<<< ts_length, TPB >>>(tuples_GPU, samples_GPU, dusts_GPU); checkCudaErrors(cudaMemcpy(dusts, dusts_GPU, dusts_size, cudaMemcpyDeviceToHost)); float dust_sum = 0; for (int i = 0; i < ts_length; i++) { dust_sum += dusts[i]; // std::cout << "### " << sqrt(dusts[i]) << std::endl; } //std::cout << "ts length: " << ts_length << std::endl; free(tuples); free(dusts); checkCudaErrors(cudaFree(tuples_GPU)); checkCudaErrors(cudaFree(dusts_GPU)); checkCudaErrors(cudaFree(samples_GPU)); return sqrt(dust_sum); } // Match 1 ts to all ts in tsc. // Repeat Integrator::distance for all combination. int MonteCarloIntegrator::match_naive(const TimeSeries &ts, const TimeSeriesCollection &tsc) { // Determine the length of time series. unsigned int ts_length = min(ts.length(), tsc.length_min()); for (int i = 0; i < tsc.sequences.size(); i++) { ts_length = min(ts_length, tsc.sequences[i].length()); } float DUST_min; float i_min; for (int i = 0; i < tsc.sequences.size(); i++) { float DUST = this->distance(ts, tsc.sequences[i], ts_length); if (DUST < DUST_min || i == 0) { DUST_min = DUST; i_min = i; } } std::cout << "matched : " << ts_length << std::endl; std::cout << "\t index: " << i_min << ", distance : " << DUST_min << std::endl; return i_min; } // Match 1 ts to all ts in tsc // Optimized version. int MonteCarloIntegrator::match(const TimeSeries &ts, const TimeSeriesCollection &tsc) { this->prepare_match(ts, tsc); // Generate uniform random number on samples_GPU. size_t samples_num = INTEGRATION_SAMPLES * ts_length * ts_num * 3; checkCudaErrors(cudaMalloc((void**)&(samples_D), sizeof(float) * samples_num)); curandGenerateUniform(*(this->gen), samples_D, samples_num); g_match<<< ts_length, TPB >>>(ts_D, tsc_D, dusts_D, ts_length, ts_num, this->samples_D); int i_min; float DUST_min; this->finish_match(&i_min, &DUST_min); checkCudaErrors(cudaFree(samples_D)); std::cout << "matched : " << ts_length << std::endl; std::cout << "\t index: " << i_min << ", distance: " << DUST_min << std::endl; return i_min; }
6d0a2e4cd9658e40c5bfb370f4025b0ad2471e9b.hip
// !!! This is a file automatically generated by hipify!!! #include "../common/common.h" #include <stdio.h> #include <assert.h> #include <hip/hip_runtime.h> /* * a multi-GPU CUDA application implementing a vector sum. */ __global__ void iKernel(float *A, float *B, float *C, const int N) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N) C[i] = A[i] + B[i]; } void initialData(float * const ip, int const size) { for (int i = 0; i < size; i++) { ip[i] = (float)rand() / (float)RAND_MAX; } } void sumOnHost(float *A, float *B, float *C, const int N) { for (int idx = 0; idx < N; idx++) { C[idx] = A[idx] + B[idx]; } } void checkResult(float *hostRef, float *gpuRef, const int N) { double epsilon = 1.0E-8; for (int i = 0; i < N; i++) { if (abs(hostRef[i] - gpuRef[i]) > epsilon) { printf("Array do not match!\n"); printf("host %5.2f gpu %5.2f at current %d\n", hostRef[i], gpuRef[i], i); break; } } } int main(int argc, char **argv) { int ngpus; printf("> starting %s", argv[0]); // get device count CHECK(hipGetDeviceCount(&ngpus)); printf("CUDA-capable devices: %i\n", ngpus); int ishift = 24; if (argc > 2) ishift = atoi(argv[2]); // memory size int size = 1 << ishift; if (argc > 1) { if (atoi(argv[1]) > ngpus) { fprintf(stderr, "Invalid number of GPUs specified: %d is greater than" "the total number of GPUs in this platform(%d)\n",atoi(argv[1]),ngpus); exit(1); } ngpus = atoi(argv[1]); } // memory size of each device int iSize = size / ngpus; size_t iBytes = iSize * sizeof(float); printf("> total array size %d M, using %d devices with each device" "handling %d M\n", size / 1024 / 1024, ngpus, iSize / 1024 / 1024); // allocate device memory float **d_A = (float **)malloc(sizeof(float *) * ngpus); float **d_B = (float **)malloc(sizeof(float *) * ngpus); float **d_C = (float **)malloc(sizeof(float *) * ngpus); float **h_A = (float **)malloc(sizeof(float *) * ngpus); float **h_B = (float **)malloc(sizeof(float *) * ngpus); float **hostRef = (float **)malloc(sizeof(float *) * ngpus); float **gpuRef = (float **)malloc(sizeof(float *) * ngpus); hipStream_t *stream = (hipStream_t *)malloc(sizeof(hipStream_t) * ngpus); for (int i = 0; i < ngpus; i++) { // set current device CHECK(hipSetDevice(i)); // allocate device memory CHECK(hipMalloc((void **)&d_A[i], iBytes)); CHECK(hipMalloc((void **)&d_B[i], iBytes)); CHECK(hipMalloc((void **)&d_C[i], iBytes)); // allocate page locked host memory for asynchronous data transfer CHECK(hipHostMalloc((void **)&h_A[i], iBytes)); CHECK(hipHostMalloc((void **)&h_B[i], iBytes)); CHECK(hipHostMalloc((void **)&hostRef[i], iBytes)); CHECK(hipHostMalloc((void **)&gpuRef[i], iBytes)); // create streams for timing and synchronizing CHECK(hipStreamCreate(&stream[i])); } dim3 block (512); dim3 grid ((iSize + block.x - 1) / block.x); for (int i = 0; i < ngpus; i++) { CHECK(hipSetDevice(i)); initialData(h_A[i], iSize); initialData(h_B[i], iSize); } // record start time double iStart = seconds(); // distributing the workload across multiple devices for (int i = 0; i < ngpus; i++) { CHECK(hipSetDevice(i)); CHECK(hipMemcpyAsync(d_A[i], h_A[i], iBytes, hipMemcpyHostToDevice, stream[i])); CHECK(hipMemcpyAsync(d_B[i], h_B[i], iBytes, hipMemcpyHostToDevice, stream[i])); hipLaunchKernelGGL(( iKernel), dim3(grid), dim3(block), 0, stream[i], d_A[i], d_B[i], d_C[i], iSize); CHECK(hipMemcpyAsync(gpuRef[i], d_C[i], iBytes, hipMemcpyDeviceToHost, stream[i])); } // synchronize streams for (int i = 0; i < ngpus; i++) { CHECK(hipSetDevice(i)); CHECK(hipStreamSynchronize(stream[i])); } // calculate the elapsed time in seconds double iElaps = seconds() - iStart; printf("%d GPU timer elapsed: %8.2fms \n", ngpus, iElaps * 1000.0); // check results for (int i = 0; i < ngpus; i++) { // set device CHECK(hipSetDevice(i)); sumOnHost(h_A[i], h_B[i], hostRef[i], iSize); checkResult(hostRef[i], gpuRef[i], iSize); } // free memory for (int i = 0; i < ngpus; i++) { CHECK(hipSetDevice(i)); CHECK(hipFree(d_A[i])); CHECK(hipFree(d_B[i])); CHECK(hipFree(d_C[i])); CHECK(hipHostFree(h_A[i])); CHECK(hipHostFree(h_B[i])); CHECK(hipHostFree(hostRef[i])); CHECK(hipHostFree(gpuRef[i])); CHECK(hipStreamDestroy(stream[i])); CHECK(hipDeviceReset()); } free(d_A); free(d_B); free(d_C); free(h_A); free(h_B); free(hostRef); free(gpuRef); free(stream); return EXIT_SUCCESS; }
6d0a2e4cd9658e40c5bfb370f4025b0ad2471e9b.cu
#include "../common/common.h" #include <stdio.h> #include <assert.h> #include <cuda_runtime.h> /* * a multi-GPU CUDA application implementing a vector sum. */ __global__ void iKernel(float *A, float *B, float *C, const int N) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N) C[i] = A[i] + B[i]; } void initialData(float * const ip, int const size) { for (int i = 0; i < size; i++) { ip[i] = (float)rand() / (float)RAND_MAX; } } void sumOnHost(float *A, float *B, float *C, const int N) { for (int idx = 0; idx < N; idx++) { C[idx] = A[idx] + B[idx]; } } void checkResult(float *hostRef, float *gpuRef, const int N) { double epsilon = 1.0E-8; for (int i = 0; i < N; i++) { if (abs(hostRef[i] - gpuRef[i]) > epsilon) { printf("Array do not match!\n"); printf("host %5.2f gpu %5.2f at current %d\n", hostRef[i], gpuRef[i], i); break; } } } int main(int argc, char **argv) { int ngpus; printf("> starting %s", argv[0]); // get device count CHECK(cudaGetDeviceCount(&ngpus)); printf("CUDA-capable devices: %i\n", ngpus); int ishift = 24; if (argc > 2) ishift = atoi(argv[2]); // memory size int size = 1 << ishift; if (argc > 1) { if (atoi(argv[1]) > ngpus) { fprintf(stderr, "Invalid number of GPUs specified: %d is greater than" "the total number of GPUs in this platform(%d)\n",atoi(argv[1]),ngpus); exit(1); } ngpus = atoi(argv[1]); } // memory size of each device int iSize = size / ngpus; size_t iBytes = iSize * sizeof(float); printf("> total array size %d M, using %d devices with each device" "handling %d M\n", size / 1024 / 1024, ngpus, iSize / 1024 / 1024); // allocate device memory float **d_A = (float **)malloc(sizeof(float *) * ngpus); float **d_B = (float **)malloc(sizeof(float *) * ngpus); float **d_C = (float **)malloc(sizeof(float *) * ngpus); float **h_A = (float **)malloc(sizeof(float *) * ngpus); float **h_B = (float **)malloc(sizeof(float *) * ngpus); float **hostRef = (float **)malloc(sizeof(float *) * ngpus); float **gpuRef = (float **)malloc(sizeof(float *) * ngpus); cudaStream_t *stream = (cudaStream_t *)malloc(sizeof(cudaStream_t) * ngpus); for (int i = 0; i < ngpus; i++) { // set current device CHECK(cudaSetDevice(i)); // allocate device memory CHECK(cudaMalloc((void **)&d_A[i], iBytes)); CHECK(cudaMalloc((void **)&d_B[i], iBytes)); CHECK(cudaMalloc((void **)&d_C[i], iBytes)); // allocate page locked host memory for asynchronous data transfer CHECK(cudaMallocHost((void **)&h_A[i], iBytes)); CHECK(cudaMallocHost((void **)&h_B[i], iBytes)); CHECK(cudaMallocHost((void **)&hostRef[i], iBytes)); CHECK(cudaMallocHost((void **)&gpuRef[i], iBytes)); // create streams for timing and synchronizing CHECK(cudaStreamCreate(&stream[i])); } dim3 block (512); dim3 grid ((iSize + block.x - 1) / block.x); for (int i = 0; i < ngpus; i++) { CHECK(cudaSetDevice(i)); initialData(h_A[i], iSize); initialData(h_B[i], iSize); } // record start time double iStart = seconds(); // distributing the workload across multiple devices for (int i = 0; i < ngpus; i++) { CHECK(cudaSetDevice(i)); CHECK(cudaMemcpyAsync(d_A[i], h_A[i], iBytes, cudaMemcpyHostToDevice, stream[i])); CHECK(cudaMemcpyAsync(d_B[i], h_B[i], iBytes, cudaMemcpyHostToDevice, stream[i])); iKernel<<<grid, block, 0, stream[i]>>>(d_A[i], d_B[i], d_C[i], iSize); CHECK(cudaMemcpyAsync(gpuRef[i], d_C[i], iBytes, cudaMemcpyDeviceToHost, stream[i])); } // synchronize streams for (int i = 0; i < ngpus; i++) { CHECK(cudaSetDevice(i)); CHECK(cudaStreamSynchronize(stream[i])); } // calculate the elapsed time in seconds double iElaps = seconds() - iStart; printf("%d GPU timer elapsed: %8.2fms \n", ngpus, iElaps * 1000.0); // check results for (int i = 0; i < ngpus; i++) { // set device CHECK(cudaSetDevice(i)); sumOnHost(h_A[i], h_B[i], hostRef[i], iSize); checkResult(hostRef[i], gpuRef[i], iSize); } // free memory for (int i = 0; i < ngpus; i++) { CHECK(cudaSetDevice(i)); CHECK(cudaFree(d_A[i])); CHECK(cudaFree(d_B[i])); CHECK(cudaFree(d_C[i])); CHECK(cudaFreeHost(h_A[i])); CHECK(cudaFreeHost(h_B[i])); CHECK(cudaFreeHost(hostRef[i])); CHECK(cudaFreeHost(gpuRef[i])); CHECK(cudaStreamDestroy(stream[i])); CHECK(cudaDeviceReset()); } free(d_A); free(d_B); free(d_C); free(h_A); free(h_B); free(hostRef); free(gpuRef); free(stream); return EXIT_SUCCESS; }
d6d2620f682bc916c591672aff86b3a3980892f7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit #include <thrust/device_vector.h> #include <thrust/functional.h> // thrust::plus #include <thrust/reduce.h> #include <thrust/device_vector.h> #include <thrust/extrema.h> #include <cmath> #include "caffe/common.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <> void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const float alpha, const float* A, const float* B, const float beta, float* C) { // Note that cublas follows fortran order. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; hipblasOperation_t cuTransA = (TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; hipblasOperation_t cuTransB = (TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; CUBLAS_CHECK(hipblasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } template <> void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const double alpha, const double* A, const double* B, const double beta, double* C) { // Note that cublas follows fortran order. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; hipblasOperation_t cuTransA = (TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; hipblasOperation_t cuTransB = (TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; CUBLAS_CHECK(hipblasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } template <> void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M, const int N, const float alpha, const float* A, const float* x, const float beta, float* y) { hipblasOperation_t cuTransA = (TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N; CUBLAS_CHECK(hipblasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha, A, N, x, 1, &beta, y, 1)); } template <> void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M, const int N, const double alpha, const double* A, const double* x, const double beta, double* y) { hipblasOperation_t cuTransA = (TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N; CUBLAS_CHECK(hipblasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha, A, N, x, 1, &beta, y, 1)); } template <> void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X, float* Y) { CUBLAS_CHECK(hipblasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1)); } template <> void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X, double* Y) { CUBLAS_CHECK(hipblasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1)); } void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) { if (X != Y) { CUDA_CHECK(hipMemcpy(Y, X, N, hipMemcpyDefault)); // NOLINT(caffe/alt_fn) } } template <> void caffe_gpu_scal<float>(const int N, const float alpha, float *X) { CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), N, &alpha, X, 1)); } template <> void caffe_gpu_scal<double>(const int N, const double alpha, double *X) { CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), N, &alpha, X, 1)); } template <> void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X, const float beta, float* Y) { caffe_gpu_scal<float>(N, beta, Y); caffe_gpu_axpy<float>(N, alpha, X, Y); } template <> void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X, const double beta, double* Y) { caffe_gpu_scal<double>(N, beta, Y); caffe_gpu_axpy<double>(N, alpha, X, Y); } template <> void caffe_gpu_dot<float>(const int n, const float* x, const float* y, float* out) { CUBLAS_CHECK(hipblasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out)); } template <> void caffe_gpu_dot<double>(const int n, const double* x, const double* y, double * out) { CUBLAS_CHECK(hipblasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out)); } template <> void caffe_gpu_asum<float>(const int n, const float* x, float* y) { CUBLAS_CHECK(hipblasSasum(Caffe::cublas_handle(), n, x, 1, y)); } template <> void caffe_gpu_asum<double>(const int n, const double* x, double* y) { CUBLAS_CHECK(hipblasDasum(Caffe::cublas_handle(), n, x, 1, y)); } template <> void caffe_gpu_scale<float>(const int n, const float alpha, const float *x, float* y) { CUBLAS_CHECK(hipblasScopy(Caffe::cublas_handle(), n, x, 1, y, 1)); CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), n, &alpha, y, 1)); } template <> void caffe_gpu_scale<double>(const int n, const double alpha, const double *x, double* y) { CUBLAS_CHECK(hipblasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1)); CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), n, &alpha, y, 1)); } template <typename Dtype> __global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = alpha; } } template <typename Dtype> void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) { if (alpha == 0) { CUDA_CHECK(hipMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn) return; } // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( set_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, alpha, Y); } template void caffe_gpu_set<int>(const int N, const int alpha, int* Y); template void caffe_gpu_set<float>(const int N, const float alpha, float* Y); template void caffe_gpu_set<double>(const int N, const double alpha, double* Y); template <typename Dtype> __global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] += alpha; } } template <> void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( add_scalar_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, alpha, Y); } template <> void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( add_scalar_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, alpha, Y); } template <typename Dtype> __global__ void add_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] + b[index]; } } template <> void caffe_gpu_add<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( add_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <> void caffe_gpu_add<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( add_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <typename Dtype> __global__ void sub_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] - b[index]; } } template <> void caffe_gpu_sub<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( sub_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <> void caffe_gpu_sub<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( sub_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <typename Dtype> __global__ void mul_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] * b[index]; } } template <> void caffe_gpu_mul<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( mul_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <> void caffe_gpu_mul<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( mul_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <typename Dtype> __global__ void div_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] / b[index]; } } template <> void caffe_gpu_div<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( div_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <> void caffe_gpu_div<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( div_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <typename Dtype> __global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = abs(a[index]); } } template <> void caffe_gpu_abs<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( abs_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } template <> void caffe_gpu_abs<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( abs_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } template <typename Dtype> __global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = exp(a[index]); } } template <> void caffe_gpu_exp<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( exp_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } template <> void caffe_gpu_exp<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( exp_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } template <typename Dtype> __global__ void log_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = log(a[index]); } } template <> void caffe_gpu_log<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( log_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } template <> void caffe_gpu_log<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( log_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } template <typename Dtype> __global__ void powx_kernel(const int n, const Dtype* a, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = pow(a[index], alpha); } } template <> void caffe_gpu_powx<float>(const int N, const float* a, const float alpha, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( powx_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, alpha, y); } template <> void caffe_gpu_powx<double>(const int N, const double* a, const double alpha, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( powx_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, alpha, y); } DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index]) - (x[index] < Dtype(0))); DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index])); void caffe_gpu_rng_uniform(const int n, unsigned int* r) { CURAND_CHECK(hiprandGenerate(Caffe::curand_generator(), r, n)); } template <> void caffe_gpu_rng_uniform<float>(const int n, const float a, const float b, float* r) { CURAND_CHECK(hiprandGenerateUniform(Caffe::curand_generator(), r, n)); const float range = b - a; if (range != static_cast<float>(1)) { caffe_gpu_scal(n, range, r); } if (a != static_cast<float>(0)) { caffe_gpu_add_scalar(n, a, r); } } template <> void caffe_gpu_rng_uniform<double>(const int n, const double a, const double b, double* r) { CURAND_CHECK(hiprandGenerateUniformDouble(Caffe::curand_generator(), r, n)); const double range = b - a; if (range != static_cast<double>(1)) { caffe_gpu_scal(n, range, r); } if (a != static_cast<double>(0)) { caffe_gpu_add_scalar(n, a, r); } } template <> void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma, float* r) { CURAND_CHECK( hiprandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma)); } template <> void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma, double* r) { CURAND_CHECK( hiprandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma)); } //----------- MY max ----------------- template <> void caffe_gpu_max<float>(const int n, const float* x, int* y) { CUBLAS_CHECK(hipblasIsamax(Caffe::cublas_handle(), n, x, 1, y)); } template <> void caffe_gpu_max<double>(const int n, const double* x, int* y) { CUBLAS_CHECK(hipblasIdamax(Caffe::cublas_handle(), n, x, 1, y)); } //--------- end my max----------------------------- //----------- MY min ----------------- template <> void caffe_gpu_min<float>(const int n, const float* x, int* y) { CUBLAS_CHECK(hipblasIsamin(Caffe::cublas_handle(), n, x, 1, y)); } template <> void caffe_gpu_min<double>(const int n, const double* x, int* y) { CUBLAS_CHECK(hipblasIdamin(Caffe::cublas_handle(), n, x, 1, y)); } //--------- end my min----------------------------- // ------------ My add -------------------------- template <typename Dtype> __global__ void my_add_kernel(const int n, const Dtype* a, Dtype b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] + b; } } template <> void my_caffe_gpu_add<float>(const int N, const float* a, float b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( my_add_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <> void my_caffe_gpu_add<double>(const int N, const double* a, double b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( my_add_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } // --------------- END div ------------------------------ // ------------ My div -------------------------- template <typename Dtype> __global__ void my_div_kernel(const int n, const Dtype* a, Dtype b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] / b; } } template <> void my_caffe_gpu_div<float>(const int N, const float* a, float b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( my_div_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <> void my_caffe_gpu_div<double>(const int N, const double* a, double b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( my_div_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } // --------------- END div ------------------------------ // ------------ My Reg -------------------------- template <typename Dtype> __global__ void my_reg_kernel_pos(const int n, const Dtype* a, Dtype max, Dtype min, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = ((a[index] - min) / (max - min)); } } template <typename Dtype> __global__ void my_reg_kernel_neg(const int n, const Dtype* a, Dtype max, Dtype min, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = ((a[index] - min) / (max - min))*(0 - (-1)) + (-1); } } template <typename Dtype> __global__ void my_reg_kernel_all(const int n, const Dtype* a, Dtype max, Dtype min, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = ((a[index] - min) / (max - min))*(1 - (-1)) + (-1); } } template <> void my_caffe_gpu_reg<float>(const int N, const float* a, float b, float c, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) if (c>=0 && b >=0) { hipLaunchKernelGGL(( my_reg_kernel_pos<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, c, y); } else if (c<0 && b>=0){ hipLaunchKernelGGL(( my_reg_kernel_all<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, c, y); } else if (c<0 && b<0){ hipLaunchKernelGGL(( my_reg_kernel_neg<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, c, y); } } template <> void my_caffe_gpu_reg<double>(const int N, const double* a, double b, double c, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) if (c>=0 && b >=0) { hipLaunchKernelGGL(( my_reg_kernel_pos<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, c, y); } else if (c<0 && b>=0){ hipLaunchKernelGGL(( my_reg_kernel_all<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, c, y); } else if (c<0 && b<0){ hipLaunchKernelGGL(( my_reg_kernel_neg<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, c, y); } // Old one /* if (c>=0) { hipLaunchKernelGGL(( my_reg_kernel_pos<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, c, y); } else{ hipLaunchKernelGGL(( my_reg_kernel_all<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, c, y); } */ } // --------------- END Reg ------------------------------ // ------------ My New Min -------------------------- template <typename Dtype> __global__ void my_new_min_kernel(const int n, const Dtype* a, Dtype *max, Dtype *min, Dtype* y) { Dtype min_=10000; Dtype max_=-1000; Dtype sum=0.0; for (int i=0;i<n;i++) { if (a[i]<min_){ // min_ = a[i]; } if (a[i]>max_){ // max_=a[i]; } sum=sum+a[i]; } *max=max_; *min=min_; } template <> void my_caffe_gpu_new_min<float>(const int N, const float* a, float *b, float *c, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) // float aa= *a; thrust::device_ptr<float> min_ptr = thrust::min_element(thrust::device_pointer_cast(y), thrust::device_pointer_cast(y) + N); thrust::device_ptr<float> max_ptr = thrust::max_element(thrust::device_pointer_cast(y), thrust::device_pointer_cast(y) + N); *c=min_ptr[0]; *b = max_ptr[0]; // *b=2; // my_new_min_kernel<float><<<CAFFE_GET_BLOCKS(N), 0>>>( N, a, b, c, y); } template <> void my_caffe_gpu_new_min<double>(const int N, const double* a, double *b, double *c, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) // double aa=*a; thrust::device_ptr<double> min_ptr = thrust::min_element(thrust::device_pointer_cast(y), thrust::device_pointer_cast(y) + N); thrust::device_ptr<double> max_ptr = thrust::max_element(thrust::device_pointer_cast(y), thrust::device_pointer_cast(y) + N); *c=min_ptr[0]; *b = max_ptr[0]; // *b=2; // my_new_min_kernel<double><<<CAFFE_GET_BLOCKS(N), 0>>>( N, a, b, c, y); } // --------------- END New Min ------------------------------ } // namespace caffe
d6d2620f682bc916c591672aff86b3a3980892f7.cu
#include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit #include <thrust/device_vector.h> #include <thrust/functional.h> // thrust::plus #include <thrust/reduce.h> #include <thrust/device_vector.h> #include <thrust/extrema.h> #include <cmath> #include "caffe/common.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <> void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const float alpha, const float* A, const float* B, const float beta, float* C) { // Note that cublas follows fortran order. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t cuTransB = (TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; CUBLAS_CHECK(cublasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } template <> void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const double alpha, const double* A, const double* B, const double beta, double* C) { // Note that cublas follows fortran order. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t cuTransB = (TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; CUBLAS_CHECK(cublasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } template <> void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M, const int N, const float alpha, const float* A, const float* x, const float beta, float* y) { cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N; CUBLAS_CHECK(cublasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha, A, N, x, 1, &beta, y, 1)); } template <> void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M, const int N, const double alpha, const double* A, const double* x, const double beta, double* y) { cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N; CUBLAS_CHECK(cublasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha, A, N, x, 1, &beta, y, 1)); } template <> void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X, float* Y) { CUBLAS_CHECK(cublasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1)); } template <> void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X, double* Y) { CUBLAS_CHECK(cublasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1)); } void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) { if (X != Y) { CUDA_CHECK(cudaMemcpy(Y, X, N, cudaMemcpyDefault)); // NOLINT(caffe/alt_fn) } } template <> void caffe_gpu_scal<float>(const int N, const float alpha, float *X) { CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), N, &alpha, X, 1)); } template <> void caffe_gpu_scal<double>(const int N, const double alpha, double *X) { CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), N, &alpha, X, 1)); } template <> void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X, const float beta, float* Y) { caffe_gpu_scal<float>(N, beta, Y); caffe_gpu_axpy<float>(N, alpha, X, Y); } template <> void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X, const double beta, double* Y) { caffe_gpu_scal<double>(N, beta, Y); caffe_gpu_axpy<double>(N, alpha, X, Y); } template <> void caffe_gpu_dot<float>(const int n, const float* x, const float* y, float* out) { CUBLAS_CHECK(cublasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out)); } template <> void caffe_gpu_dot<double>(const int n, const double* x, const double* y, double * out) { CUBLAS_CHECK(cublasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out)); } template <> void caffe_gpu_asum<float>(const int n, const float* x, float* y) { CUBLAS_CHECK(cublasSasum(Caffe::cublas_handle(), n, x, 1, y)); } template <> void caffe_gpu_asum<double>(const int n, const double* x, double* y) { CUBLAS_CHECK(cublasDasum(Caffe::cublas_handle(), n, x, 1, y)); } template <> void caffe_gpu_scale<float>(const int n, const float alpha, const float *x, float* y) { CUBLAS_CHECK(cublasScopy(Caffe::cublas_handle(), n, x, 1, y, 1)); CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), n, &alpha, y, 1)); } template <> void caffe_gpu_scale<double>(const int n, const double alpha, const double *x, double* y) { CUBLAS_CHECK(cublasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1)); CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), n, &alpha, y, 1)); } template <typename Dtype> __global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = alpha; } } template <typename Dtype> void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) { if (alpha == 0) { CUDA_CHECK(cudaMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn) return; } // NOLINT_NEXT_LINE(whitespace/operators) set_kernel<Dtype><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, alpha, Y); } template void caffe_gpu_set<int>(const int N, const int alpha, int* Y); template void caffe_gpu_set<float>(const int N, const float alpha, float* Y); template void caffe_gpu_set<double>(const int N, const double alpha, double* Y); template <typename Dtype> __global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] += alpha; } } template <> void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) { // NOLINT_NEXT_LINE(whitespace/operators) add_scalar_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, alpha, Y); } template <> void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) { // NOLINT_NEXT_LINE(whitespace/operators) add_scalar_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, alpha, Y); } template <typename Dtype> __global__ void add_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] + b[index]; } } template <> void caffe_gpu_add<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) add_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <> void caffe_gpu_add<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) add_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <typename Dtype> __global__ void sub_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] - b[index]; } } template <> void caffe_gpu_sub<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) sub_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <> void caffe_gpu_sub<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) sub_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <typename Dtype> __global__ void mul_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] * b[index]; } } template <> void caffe_gpu_mul<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) mul_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <> void caffe_gpu_mul<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) mul_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <typename Dtype> __global__ void div_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] / b[index]; } } template <> void caffe_gpu_div<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) div_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <> void caffe_gpu_div<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) div_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <typename Dtype> __global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = abs(a[index]); } } template <> void caffe_gpu_abs<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) abs_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <> void caffe_gpu_abs<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) abs_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <typename Dtype> __global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = exp(a[index]); } } template <> void caffe_gpu_exp<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) exp_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <> void caffe_gpu_exp<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) exp_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <typename Dtype> __global__ void log_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = log(a[index]); } } template <> void caffe_gpu_log<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) log_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <> void caffe_gpu_log<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) log_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <typename Dtype> __global__ void powx_kernel(const int n, const Dtype* a, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = pow(a[index], alpha); } } template <> void caffe_gpu_powx<float>(const int N, const float* a, const float alpha, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) powx_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, alpha, y); } template <> void caffe_gpu_powx<double>(const int N, const double* a, const double alpha, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) powx_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, alpha, y); } DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index]) - (x[index] < Dtype(0))); DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index])); void caffe_gpu_rng_uniform(const int n, unsigned int* r) { CURAND_CHECK(curandGenerate(Caffe::curand_generator(), r, n)); } template <> void caffe_gpu_rng_uniform<float>(const int n, const float a, const float b, float* r) { CURAND_CHECK(curandGenerateUniform(Caffe::curand_generator(), r, n)); const float range = b - a; if (range != static_cast<float>(1)) { caffe_gpu_scal(n, range, r); } if (a != static_cast<float>(0)) { caffe_gpu_add_scalar(n, a, r); } } template <> void caffe_gpu_rng_uniform<double>(const int n, const double a, const double b, double* r) { CURAND_CHECK(curandGenerateUniformDouble(Caffe::curand_generator(), r, n)); const double range = b - a; if (range != static_cast<double>(1)) { caffe_gpu_scal(n, range, r); } if (a != static_cast<double>(0)) { caffe_gpu_add_scalar(n, a, r); } } template <> void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma, float* r) { CURAND_CHECK( curandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma)); } template <> void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma, double* r) { CURAND_CHECK( curandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma)); } //----------- MY max ----------------- template <> void caffe_gpu_max<float>(const int n, const float* x, int* y) { CUBLAS_CHECK(cublasIsamax(Caffe::cublas_handle(), n, x, 1, y)); } template <> void caffe_gpu_max<double>(const int n, const double* x, int* y) { CUBLAS_CHECK(cublasIdamax(Caffe::cublas_handle(), n, x, 1, y)); } //--------- end my max----------------------------- //----------- MY min ----------------- template <> void caffe_gpu_min<float>(const int n, const float* x, int* y) { CUBLAS_CHECK(cublasIsamin(Caffe::cublas_handle(), n, x, 1, y)); } template <> void caffe_gpu_min<double>(const int n, const double* x, int* y) { CUBLAS_CHECK(cublasIdamin(Caffe::cublas_handle(), n, x, 1, y)); } //--------- end my min----------------------------- // ------------ My add -------------------------- template <typename Dtype> __global__ void my_add_kernel(const int n, const Dtype* a, Dtype b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] + b; } } template <> void my_caffe_gpu_add<float>(const int N, const float* a, float b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) my_add_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <> void my_caffe_gpu_add<double>(const int N, const double* a, double b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) my_add_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } // --------------- END div ------------------------------ // ------------ My div -------------------------- template <typename Dtype> __global__ void my_div_kernel(const int n, const Dtype* a, Dtype b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] / b; } } template <> void my_caffe_gpu_div<float>(const int N, const float* a, float b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) my_div_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <> void my_caffe_gpu_div<double>(const int N, const double* a, double b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) my_div_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } // --------------- END div ------------------------------ // ------------ My Reg -------------------------- template <typename Dtype> __global__ void my_reg_kernel_pos(const int n, const Dtype* a, Dtype max, Dtype min, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = ((a[index] - min) / (max - min)); } } template <typename Dtype> __global__ void my_reg_kernel_neg(const int n, const Dtype* a, Dtype max, Dtype min, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = ((a[index] - min) / (max - min))*(0 - (-1)) + (-1); } } template <typename Dtype> __global__ void my_reg_kernel_all(const int n, const Dtype* a, Dtype max, Dtype min, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = ((a[index] - min) / (max - min))*(1 - (-1)) + (-1); } } template <> void my_caffe_gpu_reg<float>(const int N, const float* a, float b, float c, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) if (c>=0 && b >=0) { my_reg_kernel_pos<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(N, a, b, c, y); } else if (c<0 && b>=0){ my_reg_kernel_all<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(N, a, b, c, y); } else if (c<0 && b<0){ my_reg_kernel_neg<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(N, a, b, c, y); } } template <> void my_caffe_gpu_reg<double>(const int N, const double* a, double b, double c, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) if (c>=0 && b >=0) { my_reg_kernel_pos<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(N, a, b, c, y); } else if (c<0 && b>=0){ my_reg_kernel_all<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(N, a, b, c, y); } else if (c<0 && b<0){ my_reg_kernel_neg<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(N, a, b, c, y); } // Old one /* if (c>=0) { my_reg_kernel_pos<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(N, a, b, c, y); } else{ my_reg_kernel_all<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(N, a, b, c, y); } */ } // --------------- END Reg ------------------------------ // ------------ My New Min -------------------------- template <typename Dtype> __global__ void my_new_min_kernel(const int n, const Dtype* a, Dtype *max, Dtype *min, Dtype* y) { Dtype min_=10000; Dtype max_=-1000; Dtype sum=0.0; for (int i=0;i<n;i++) { if (a[i]<min_){ // min_ = a[i]; } if (a[i]>max_){ // max_=a[i]; } sum=sum+a[i]; } *max=max_; *min=min_; } template <> void my_caffe_gpu_new_min<float>(const int N, const float* a, float *b, float *c, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) // float aa= *a; thrust::device_ptr<float> min_ptr = thrust::min_element(thrust::device_pointer_cast(y), thrust::device_pointer_cast(y) + N); thrust::device_ptr<float> max_ptr = thrust::max_element(thrust::device_pointer_cast(y), thrust::device_pointer_cast(y) + N); *c=min_ptr[0]; *b = max_ptr[0]; // *b=2; // my_new_min_kernel<float><<<CAFFE_GET_BLOCKS(N), 0>>>( N, a, b, c, y); } template <> void my_caffe_gpu_new_min<double>(const int N, const double* a, double *b, double *c, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) // double aa=*a; thrust::device_ptr<double> min_ptr = thrust::min_element(thrust::device_pointer_cast(y), thrust::device_pointer_cast(y) + N); thrust::device_ptr<double> max_ptr = thrust::max_element(thrust::device_pointer_cast(y), thrust::device_pointer_cast(y) + N); *c=min_ptr[0]; *b = max_ptr[0]; // *b=2; // my_new_min_kernel<double><<<CAFFE_GET_BLOCKS(N), 0>>>( N, a, b, c, y); } // --------------- END New Min ------------------------------ } // namespace caffe
3730af3cf69aa08979b9bf6b3009886ec0baefbc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "cuda-common.hxx" template<typename TT> __global__ void mm_kernel(const TT * AA, const TT * BB, TT * CC, size_t nn) { unsigned int ii = blockIdx.x * blockDim.x + threadIdx.x; unsigned int jj = blockIdx.y * blockDim.y + threadIdx.y; unsigned int index = ii*nn + jj; if(ii < nn and jj < nn) { TT answer = 0; for(int kk = 0; kk < nn; ++kk) answer += AA[ii*nn + kk] * BB[kk*nn + jj]; CC[index] = answer; } } template<typename TT> void matrix_multiply(const TT * AA, const TT * BB, TT * CC, size_t nn) { TT *da, *db, *dc; CUDA_CALL(hipMalloc((void **) &da, sizeof(TT)*nn*nn)); CUDA_CALL(hipMalloc((void **) &db, sizeof(TT)*nn*nn)); CUDA_CALL(hipMalloc((void **) &dc, sizeof(TT)*nn*nn)); CUDA_CALL(hipMemcpy(da, AA, sizeof(TT)*nn*nn, hipMemcpyHostToDevice)); CUDA_CALL(hipMemcpy(db, BB, sizeof(TT)*nn*nn, hipMemcpyHostToDevice)); dim3 dimGrid(ceil(nn/32.0), ceil(nn/32.0)); dim3 dimBlock(32,32); hipLaunchKernelGGL(( mm_kernel), dim3(dimGrid), dim3(dimBlock) , 0, 0, da, db, dc, nn); CUDA_CALL(hipPeekAtLastError()); CUDA_CALL(hipDeviceSynchronize()); CUDA_CALL(hipMemcpy(CC, dc, sizeof(TT)*nn*nn, hipMemcpyDeviceToHost)); CUDA_CALL(hipFree(da)); CUDA_CALL(hipFree(db)); CUDA_CALL(hipFree(dc)); } template void matrix_multiply<float>(const float*, const float*, float*, size_t); template void matrix_multiply<double>(const double*, const double*, double*, size_t);
3730af3cf69aa08979b9bf6b3009886ec0baefbc.cu
#include "cuda-common.hxx" template<typename TT> __global__ void mm_kernel(const TT * AA, const TT * BB, TT * CC, size_t nn) { unsigned int ii = blockIdx.x * blockDim.x + threadIdx.x; unsigned int jj = blockIdx.y * blockDim.y + threadIdx.y; unsigned int index = ii*nn + jj; if(ii < nn and jj < nn) { TT answer = 0; for(int kk = 0; kk < nn; ++kk) answer += AA[ii*nn + kk] * BB[kk*nn + jj]; CC[index] = answer; } } template<typename TT> void matrix_multiply(const TT * AA, const TT * BB, TT * CC, size_t nn) { TT *da, *db, *dc; CUDA_CALL(cudaMalloc((void **) &da, sizeof(TT)*nn*nn)); CUDA_CALL(cudaMalloc((void **) &db, sizeof(TT)*nn*nn)); CUDA_CALL(cudaMalloc((void **) &dc, sizeof(TT)*nn*nn)); CUDA_CALL(cudaMemcpy(da, AA, sizeof(TT)*nn*nn, cudaMemcpyHostToDevice)); CUDA_CALL(cudaMemcpy(db, BB, sizeof(TT)*nn*nn, cudaMemcpyHostToDevice)); dim3 dimGrid(ceil(nn/32.0), ceil(nn/32.0)); dim3 dimBlock(32,32); mm_kernel<<< dimGrid, dimBlock >>>(da, db, dc, nn); CUDA_CALL(cudaPeekAtLastError()); CUDA_CALL(cudaDeviceSynchronize()); CUDA_CALL(cudaMemcpy(CC, dc, sizeof(TT)*nn*nn, cudaMemcpyDeviceToHost)); CUDA_CALL(cudaFree(da)); CUDA_CALL(cudaFree(db)); CUDA_CALL(cudaFree(dc)); } template void matrix_multiply<float>(const float*, const float*, float*, size_t); template void matrix_multiply<double>(const double*, const double*, double*, size_t);
1d53168402d9e58515956484074ed309b2d5f6d9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void sumMatrixOnGPU1D(float *MatA, float *MatB, float *MatC, int nx, int ny) { unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x; if (ix < nx ) for (int iy = 0; iy < ny; iy++) { int idx = iy * nx + ix; MatC[idx] = MatA[idx] + MatB[idx]; } }
1d53168402d9e58515956484074ed309b2d5f6d9.cu
#include "includes.h" __global__ void sumMatrixOnGPU1D(float *MatA, float *MatB, float *MatC, int nx, int ny) { unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x; if (ix < nx ) for (int iy = 0; iy < ny; iy++) { int idx = iy * nx + ix; MatC[idx] = MatA[idx] + MatB[idx]; } }
b5fed37530d111a1e2a566d33023eb55c83d2644.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ #include <faiss/gpu/GpuIndex.h> #include <faiss/impl/FaissAssert.h> #include <faiss/gpu/GpuResources.h> #include <faiss/gpu/impl/Metrics.cuh> #include <faiss/gpu/utils/CopyUtils.cuh> #include <faiss/gpu/utils/DeviceUtils.h> #include <faiss/gpu/utils/StaticUtils.h> #include <limits> #include <memory> namespace faiss { namespace gpu { /// Default CPU search size for which we use paged copies constexpr size_t kMinPageSize = (size_t) 256 * 1024 * 1024; /// Size above which we page copies from the CPU to GPU (non-paged /// memory usage) constexpr size_t kNonPinnedPageSize = (size_t) 256 * 1024 * 1024; // Default size for which we page add or search constexpr size_t kAddPageSize = (size_t) 256 * 1024 * 1024; // Or, maximum number of vectors to consider per page of add or search constexpr size_t kAddVecSize = (size_t) 512 * 1024; // Use a smaller search size, as precomputed code usage on IVFPQ // requires substantial amounts of memory // FIXME: parameterize based on algorithm need constexpr size_t kSearchVecSize = (size_t) 32 * 1024; GpuIndex::GpuIndex(GpuResources* resources, int dims, faiss::MetricType metric, GpuIndexConfig config) : Index(dims, metric), resources_(resources), device_(config.device), memorySpace_(config.memorySpace), minPagedSize_(kMinPageSize) { FAISS_THROW_IF_NOT_FMT(device_ < getNumDevices(), "Invalid GPU device %d", device_); FAISS_THROW_IF_NOT_MSG(dims > 0, "Invalid number of dimensions"); #ifdef FAISS_UNIFIED_MEM FAISS_THROW_IF_NOT_FMT( memorySpace_ == MemorySpace::Device || (memorySpace_ == MemorySpace::Unified && getFullUnifiedMemSupport(device_)), "Device %d does not support full CUDA 8 Unified Memory (CC 6.0+)", config.device); #else FAISS_THROW_IF_NOT_MSG(memorySpace_ == MemorySpace::Device, "Must compile with CUDA 8+ for Unified Memory support"); #endif FAISS_THROW_IF_NOT_MSG(isMetricSupported(metric), "Unsupported metric type on GPU"); FAISS_ASSERT(resources_); resources_->initializeForDevice(device_); } void GpuIndex::setMinPagingSize(size_t size) { minPagedSize_ = size; } size_t GpuIndex::getMinPagingSize() const { return minPagedSize_; } void GpuIndex::add(Index::idx_t n, const float* x) { // Pass to add_with_ids add_with_ids(n, x, nullptr); } void GpuIndex::add_with_ids(Index::idx_t n, const float* x, const Index::idx_t* ids) { FAISS_THROW_IF_NOT_MSG(this->is_trained, "Index not trained"); // For now, only support <= max int results FAISS_THROW_IF_NOT_FMT(n <= (Index::idx_t) std::numeric_limits<int>::max(), "GPU index only supports up to %d indices", std::numeric_limits<int>::max()); if (n == 0) { // nothing to add return; } std::vector<Index::idx_t> generatedIds; // Generate IDs if we need them if (!ids && addImplRequiresIDs_()) { generatedIds = std::vector<Index::idx_t>(n); for (Index::idx_t i = 0; i < n; ++i) { generatedIds[i] = this->ntotal + i; } } DeviceScope scope(device_); addPaged_((int) n, x, ids ? ids : generatedIds.data()); } void GpuIndex::addPaged_(int n, const float* x, const Index::idx_t* ids) { if (n > 0) { size_t totalSize = (size_t) n * this->d * sizeof(float); if (totalSize > kAddPageSize || n > kAddVecSize) { // How many vectors fit into kAddPageSize? size_t maxNumVecsForPageSize = kAddPageSize / ((size_t) this->d * sizeof(float)); // Always add at least 1 vector, if we have huge vectors maxNumVecsForPageSize = ::max(maxNumVecsForPageSize, (size_t) 1); size_t tileSize = ::min((size_t) n, maxNumVecsForPageSize); tileSize = ::min(tileSize, kSearchVecSize); for (size_t i = 0; i < (size_t) n; i += tileSize) { size_t curNum = ::min(tileSize, n - i); addPage_(curNum, x + i * (size_t) this->d, ids ? ids + i : nullptr); } } else { addPage_(n, x, ids); } } } void GpuIndex::addPage_(int n, const float* x, const Index::idx_t* ids) { // At this point, `x` can be resident on CPU or GPU, and `ids` may be resident // on CPU, GPU or may be null. // // Before continuing, we guarantee that all data will be resident on the GPU. auto stream = resources_->getDefaultStreamCurrentDevice(); auto vecs = toDevice<float, 2>(resources_, device_, const_cast<float*>(x), stream, {n, this->d}); if (ids) { auto indices = toDevice<Index::idx_t, 1>(resources_, device_, const_cast<Index::idx_t*>(ids), stream, {n}); addImpl_(n, vecs.data(), ids ? indices.data() : nullptr); } else { addImpl_(n, vecs.data(), nullptr); } } void GpuIndex::search(Index::idx_t n, const float* x, Index::idx_t k, float* distances, Index::idx_t* labels, ConcurrentBitsetPtr bitset) const { FAISS_THROW_IF_NOT_MSG(this->is_trained, "Index not trained"); // For now, only support <= max int results FAISS_THROW_IF_NOT_FMT(n <= (Index::idx_t) std::numeric_limits<int>::max(), "GPU index only supports up to %d indices", std::numeric_limits<int>::max()); // Maximum k-selection supported is based on the CUDA SDK FAISS_THROW_IF_NOT_FMT(k <= (Index::idx_t) getMaxKSelection(), "GPU index only supports k <= %d (requested %d)", getMaxKSelection(), (int) k); // select limitation if (n == 0 || k == 0) { // nothing to search return; } DeviceScope scope(device_); auto stream = resources_->getDefaultStream(device_); // We guarantee that the searchImpl_ will be called with device-resident // pointers. // The input vectors may be too large for the GPU, but we still // assume that the output distances and labels are not. // Go ahead and make space for output distances and labels on the // GPU. // If we reach a point where all inputs are too big, we can add // another level of tiling. auto outDistances = toDevice<float, 2>(resources_, device_, distances, stream, {(int) n, (int) k}); auto outLabels = toDevice<faiss::Index::idx_t, 2>(resources_, device_, labels, stream, {(int) n, (int) k}); bool usePaged = false; if (getDeviceForAddress(x) == -1) { // It is possible that the user is querying for a vector set size // `x` that won't fit on the GPU. // In this case, we will have to handle paging of the data from CPU // -> GPU. // Currently, we don't handle the case where the output data won't // fit on the GPU (e.g., n * k is too large for the GPU memory). size_t dataSize = (size_t) n * this->d * sizeof(float); if (dataSize >= minPagedSize_) { searchFromCpuPaged_(n, x, k, outDistances.data(), outLabels.data(), bitset); usePaged = true; } } if (!usePaged) { searchNonPaged_(n, x, k, outDistances.data(), outLabels.data(), bitset); } // Copy back if necessary fromDevice<float, 2>(outDistances, distances, stream); fromDevice<faiss::Index::idx_t, 2>(outLabels, labels, stream); } void GpuIndex::searchNonPaged_(int n, const float* x, int k, float* outDistancesData, Index::idx_t* outIndicesData, ConcurrentBitsetPtr bitset) const { auto stream = resources_->getDefaultStream(device_); // Make sure arguments are on the device we desire; use temporary // memory allocations to move it if necessary auto vecs = toDevice<float, 2>(resources_, device_, const_cast<float*>(x), stream, {n, (int) this->d}); searchImpl_(n, vecs.data(), k, outDistancesData, outIndicesData, bitset); } void GpuIndex::searchFromCpuPaged_(int n, const float* x, int k, float* outDistancesData, Index::idx_t* outIndicesData, ConcurrentBitsetPtr bitset) const { Tensor<float, 2, true> outDistances(outDistancesData, {n, k}); Tensor<Index::idx_t, 2, true> outIndices(outIndicesData, {n, k}); // Is pinned memory available? auto pinnedAlloc = resources_->getPinnedMemory(); int pageSizeInVecs = (int) ((pinnedAlloc.second / 2) / (sizeof(float) * this->d)); if (!pinnedAlloc.first || pageSizeInVecs < 1) { // Just page without overlapping copy with compute int batchSize = utils::nextHighestPowerOf2( (int) ((size_t) kNonPinnedPageSize / (sizeof(float) * this->d))); for (int cur = 0; cur < n; cur += batchSize) { int num = ::min(batchSize, n - cur); auto outDistancesSlice = outDistances.narrowOutermost(cur, num); auto outIndicesSlice = outIndices.narrowOutermost(cur, num); searchNonPaged_(num, x + (size_t) cur * this->d, k, outDistancesSlice.data(), outIndicesSlice.data(), bitset); } return; } // // Pinned memory is available, so we can overlap copy with compute. // We use two pinned memory buffers, and triple-buffer the // procedure: // // 1 CPU copy -> pinned // 2 pinned copy -> GPU // 3 GPU compute // // 1 2 3 1 2 3 ... (pinned buf A) // 1 2 3 1 2 ... (pinned buf B) // 1 2 3 1 ... (pinned buf A) // time -> // auto defaultStream = resources_->getDefaultStream(device_); auto copyStream = resources_->getAsyncCopyStream(device_); FAISS_ASSERT((size_t) pageSizeInVecs * this->d <= (size_t) std::numeric_limits<int>::max()); float* bufPinnedA = (float*) pinnedAlloc.first; float* bufPinnedB = bufPinnedA + (size_t) pageSizeInVecs * this->d; float* bufPinned[2] = {bufPinnedA, bufPinnedB}; // Reserve space on the GPU for the destination of the pinned buffer // copy DeviceTensor<float, 2, true> bufGpuA( resources_->getMemoryManagerCurrentDevice(), {(int) pageSizeInVecs, (int) this->d}, defaultStream); DeviceTensor<float, 2, true> bufGpuB( resources_->getMemoryManagerCurrentDevice(), {(int) pageSizeInVecs, (int) this->d}, defaultStream); DeviceTensor<float, 2, true>* bufGpus[2] = {&bufGpuA, &bufGpuB}; // Copy completion events for the pinned buffers std::unique_ptr<CudaEvent> eventPinnedCopyDone[2]; // Execute completion events for the GPU buffers std::unique_ptr<CudaEvent> eventGpuExecuteDone[2]; // All offsets are in terms of number of vectors; they remain within // int bounds (as this function only handles max in vectors) // Current start offset for buffer 1 int cur1 = 0; int cur1BufIndex = 0; // Current start offset for buffer 2 int cur2 = -1; int cur2BufIndex = 0; // Current start offset for buffer 3 int cur3 = -1; int cur3BufIndex = 0; while (cur3 < n) { // Start async pinned -> GPU copy first (buf 2) if (cur2 != -1 && cur2 < n) { // Copy pinned to GPU int numToCopy = ::min(pageSizeInVecs, n - cur2); // Make sure any previous execution has completed before continuing auto& eventPrev = eventGpuExecuteDone[cur2BufIndex]; if (eventPrev.get()) { eventPrev->streamWaitOnEvent(copyStream); } CUDA_VERIFY(hipMemcpyAsync(bufGpus[cur2BufIndex]->data(), bufPinned[cur2BufIndex], (size_t) numToCopy * this->d * sizeof(float), hipMemcpyHostToDevice, copyStream)); // Mark a completion event in this stream eventPinnedCopyDone[cur2BufIndex] = std::move(std::unique_ptr<CudaEvent>(new CudaEvent(copyStream))); // We pick up from here cur3 = cur2; cur2 += numToCopy; cur2BufIndex = (cur2BufIndex == 0) ? 1 : 0; } if (cur3 != -1 && cur3 < n) { // Process on GPU int numToProcess = ::min(pageSizeInVecs, n - cur3); // Make sure the previous copy has completed before continuing auto& eventPrev = eventPinnedCopyDone[cur3BufIndex]; FAISS_ASSERT(eventPrev.get()); eventPrev->streamWaitOnEvent(defaultStream); // Create tensor wrappers // DeviceTensor<float, 2, true> input(bufGpus[cur3BufIndex]->data(), // {numToProcess, this->d}); auto outDistancesSlice = outDistances.narrowOutermost(cur3, numToProcess); auto outIndicesSlice = outIndices.narrowOutermost(cur3, numToProcess); searchImpl_(numToProcess, bufGpus[cur3BufIndex]->data(), k, outDistancesSlice.data(), outIndicesSlice.data(), bitset); // Create completion event eventGpuExecuteDone[cur3BufIndex] = std::move(std::unique_ptr<CudaEvent>(new CudaEvent(defaultStream))); // We pick up from here cur3BufIndex = (cur3BufIndex == 0) ? 1 : 0; cur3 += numToProcess; } if (cur1 < n) { // Copy CPU mem to CPU pinned int numToCopy = ::min(pageSizeInVecs, n - cur1); // Make sure any previous copy has completed before continuing auto& eventPrev = eventPinnedCopyDone[cur1BufIndex]; if (eventPrev.get()) { eventPrev->cpuWaitOnEvent(); } memcpy(bufPinned[cur1BufIndex], x + (size_t) cur1 * this->d, (size_t) numToCopy * this->d * sizeof(float)); // We pick up from here cur2 = cur1; cur1 += numToCopy; cur1BufIndex = (cur1BufIndex == 0) ? 1 : 0; } } } void GpuIndex::compute_residual(const float* x, float* residual, Index::idx_t key) const { FAISS_THROW_MSG("compute_residual not implemented for this type of index"); } void GpuIndex::compute_residual_n(Index::idx_t n, const float* xs, float* residuals, const Index::idx_t* keys) const { FAISS_THROW_MSG("compute_residual_n not implemented for this type of index"); } } } // namespace
b5fed37530d111a1e2a566d33023eb55c83d2644.cu
/** * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ #include <faiss/gpu/GpuIndex.h> #include <faiss/impl/FaissAssert.h> #include <faiss/gpu/GpuResources.h> #include <faiss/gpu/impl/Metrics.cuh> #include <faiss/gpu/utils/CopyUtils.cuh> #include <faiss/gpu/utils/DeviceUtils.h> #include <faiss/gpu/utils/StaticUtils.h> #include <limits> #include <memory> namespace faiss { namespace gpu { /// Default CPU search size for which we use paged copies constexpr size_t kMinPageSize = (size_t) 256 * 1024 * 1024; /// Size above which we page copies from the CPU to GPU (non-paged /// memory usage) constexpr size_t kNonPinnedPageSize = (size_t) 256 * 1024 * 1024; // Default size for which we page add or search constexpr size_t kAddPageSize = (size_t) 256 * 1024 * 1024; // Or, maximum number of vectors to consider per page of add or search constexpr size_t kAddVecSize = (size_t) 512 * 1024; // Use a smaller search size, as precomputed code usage on IVFPQ // requires substantial amounts of memory // FIXME: parameterize based on algorithm need constexpr size_t kSearchVecSize = (size_t) 32 * 1024; GpuIndex::GpuIndex(GpuResources* resources, int dims, faiss::MetricType metric, GpuIndexConfig config) : Index(dims, metric), resources_(resources), device_(config.device), memorySpace_(config.memorySpace), minPagedSize_(kMinPageSize) { FAISS_THROW_IF_NOT_FMT(device_ < getNumDevices(), "Invalid GPU device %d", device_); FAISS_THROW_IF_NOT_MSG(dims > 0, "Invalid number of dimensions"); #ifdef FAISS_UNIFIED_MEM FAISS_THROW_IF_NOT_FMT( memorySpace_ == MemorySpace::Device || (memorySpace_ == MemorySpace::Unified && getFullUnifiedMemSupport(device_)), "Device %d does not support full CUDA 8 Unified Memory (CC 6.0+)", config.device); #else FAISS_THROW_IF_NOT_MSG(memorySpace_ == MemorySpace::Device, "Must compile with CUDA 8+ for Unified Memory support"); #endif FAISS_THROW_IF_NOT_MSG(isMetricSupported(metric), "Unsupported metric type on GPU"); FAISS_ASSERT(resources_); resources_->initializeForDevice(device_); } void GpuIndex::setMinPagingSize(size_t size) { minPagedSize_ = size; } size_t GpuIndex::getMinPagingSize() const { return minPagedSize_; } void GpuIndex::add(Index::idx_t n, const float* x) { // Pass to add_with_ids add_with_ids(n, x, nullptr); } void GpuIndex::add_with_ids(Index::idx_t n, const float* x, const Index::idx_t* ids) { FAISS_THROW_IF_NOT_MSG(this->is_trained, "Index not trained"); // For now, only support <= max int results FAISS_THROW_IF_NOT_FMT(n <= (Index::idx_t) std::numeric_limits<int>::max(), "GPU index only supports up to %d indices", std::numeric_limits<int>::max()); if (n == 0) { // nothing to add return; } std::vector<Index::idx_t> generatedIds; // Generate IDs if we need them if (!ids && addImplRequiresIDs_()) { generatedIds = std::vector<Index::idx_t>(n); for (Index::idx_t i = 0; i < n; ++i) { generatedIds[i] = this->ntotal + i; } } DeviceScope scope(device_); addPaged_((int) n, x, ids ? ids : generatedIds.data()); } void GpuIndex::addPaged_(int n, const float* x, const Index::idx_t* ids) { if (n > 0) { size_t totalSize = (size_t) n * this->d * sizeof(float); if (totalSize > kAddPageSize || n > kAddVecSize) { // How many vectors fit into kAddPageSize? size_t maxNumVecsForPageSize = kAddPageSize / ((size_t) this->d * sizeof(float)); // Always add at least 1 vector, if we have huge vectors maxNumVecsForPageSize = std::max(maxNumVecsForPageSize, (size_t) 1); size_t tileSize = std::min((size_t) n, maxNumVecsForPageSize); tileSize = std::min(tileSize, kSearchVecSize); for (size_t i = 0; i < (size_t) n; i += tileSize) { size_t curNum = std::min(tileSize, n - i); addPage_(curNum, x + i * (size_t) this->d, ids ? ids + i : nullptr); } } else { addPage_(n, x, ids); } } } void GpuIndex::addPage_(int n, const float* x, const Index::idx_t* ids) { // At this point, `x` can be resident on CPU or GPU, and `ids` may be resident // on CPU, GPU or may be null. // // Before continuing, we guarantee that all data will be resident on the GPU. auto stream = resources_->getDefaultStreamCurrentDevice(); auto vecs = toDevice<float, 2>(resources_, device_, const_cast<float*>(x), stream, {n, this->d}); if (ids) { auto indices = toDevice<Index::idx_t, 1>(resources_, device_, const_cast<Index::idx_t*>(ids), stream, {n}); addImpl_(n, vecs.data(), ids ? indices.data() : nullptr); } else { addImpl_(n, vecs.data(), nullptr); } } void GpuIndex::search(Index::idx_t n, const float* x, Index::idx_t k, float* distances, Index::idx_t* labels, ConcurrentBitsetPtr bitset) const { FAISS_THROW_IF_NOT_MSG(this->is_trained, "Index not trained"); // For now, only support <= max int results FAISS_THROW_IF_NOT_FMT(n <= (Index::idx_t) std::numeric_limits<int>::max(), "GPU index only supports up to %d indices", std::numeric_limits<int>::max()); // Maximum k-selection supported is based on the CUDA SDK FAISS_THROW_IF_NOT_FMT(k <= (Index::idx_t) getMaxKSelection(), "GPU index only supports k <= %d (requested %d)", getMaxKSelection(), (int) k); // select limitation if (n == 0 || k == 0) { // nothing to search return; } DeviceScope scope(device_); auto stream = resources_->getDefaultStream(device_); // We guarantee that the searchImpl_ will be called with device-resident // pointers. // The input vectors may be too large for the GPU, but we still // assume that the output distances and labels are not. // Go ahead and make space for output distances and labels on the // GPU. // If we reach a point where all inputs are too big, we can add // another level of tiling. auto outDistances = toDevice<float, 2>(resources_, device_, distances, stream, {(int) n, (int) k}); auto outLabels = toDevice<faiss::Index::idx_t, 2>(resources_, device_, labels, stream, {(int) n, (int) k}); bool usePaged = false; if (getDeviceForAddress(x) == -1) { // It is possible that the user is querying for a vector set size // `x` that won't fit on the GPU. // In this case, we will have to handle paging of the data from CPU // -> GPU. // Currently, we don't handle the case where the output data won't // fit on the GPU (e.g., n * k is too large for the GPU memory). size_t dataSize = (size_t) n * this->d * sizeof(float); if (dataSize >= minPagedSize_) { searchFromCpuPaged_(n, x, k, outDistances.data(), outLabels.data(), bitset); usePaged = true; } } if (!usePaged) { searchNonPaged_(n, x, k, outDistances.data(), outLabels.data(), bitset); } // Copy back if necessary fromDevice<float, 2>(outDistances, distances, stream); fromDevice<faiss::Index::idx_t, 2>(outLabels, labels, stream); } void GpuIndex::searchNonPaged_(int n, const float* x, int k, float* outDistancesData, Index::idx_t* outIndicesData, ConcurrentBitsetPtr bitset) const { auto stream = resources_->getDefaultStream(device_); // Make sure arguments are on the device we desire; use temporary // memory allocations to move it if necessary auto vecs = toDevice<float, 2>(resources_, device_, const_cast<float*>(x), stream, {n, (int) this->d}); searchImpl_(n, vecs.data(), k, outDistancesData, outIndicesData, bitset); } void GpuIndex::searchFromCpuPaged_(int n, const float* x, int k, float* outDistancesData, Index::idx_t* outIndicesData, ConcurrentBitsetPtr bitset) const { Tensor<float, 2, true> outDistances(outDistancesData, {n, k}); Tensor<Index::idx_t, 2, true> outIndices(outIndicesData, {n, k}); // Is pinned memory available? auto pinnedAlloc = resources_->getPinnedMemory(); int pageSizeInVecs = (int) ((pinnedAlloc.second / 2) / (sizeof(float) * this->d)); if (!pinnedAlloc.first || pageSizeInVecs < 1) { // Just page without overlapping copy with compute int batchSize = utils::nextHighestPowerOf2( (int) ((size_t) kNonPinnedPageSize / (sizeof(float) * this->d))); for (int cur = 0; cur < n; cur += batchSize) { int num = std::min(batchSize, n - cur); auto outDistancesSlice = outDistances.narrowOutermost(cur, num); auto outIndicesSlice = outIndices.narrowOutermost(cur, num); searchNonPaged_(num, x + (size_t) cur * this->d, k, outDistancesSlice.data(), outIndicesSlice.data(), bitset); } return; } // // Pinned memory is available, so we can overlap copy with compute. // We use two pinned memory buffers, and triple-buffer the // procedure: // // 1 CPU copy -> pinned // 2 pinned copy -> GPU // 3 GPU compute // // 1 2 3 1 2 3 ... (pinned buf A) // 1 2 3 1 2 ... (pinned buf B) // 1 2 3 1 ... (pinned buf A) // time -> // auto defaultStream = resources_->getDefaultStream(device_); auto copyStream = resources_->getAsyncCopyStream(device_); FAISS_ASSERT((size_t) pageSizeInVecs * this->d <= (size_t) std::numeric_limits<int>::max()); float* bufPinnedA = (float*) pinnedAlloc.first; float* bufPinnedB = bufPinnedA + (size_t) pageSizeInVecs * this->d; float* bufPinned[2] = {bufPinnedA, bufPinnedB}; // Reserve space on the GPU for the destination of the pinned buffer // copy DeviceTensor<float, 2, true> bufGpuA( resources_->getMemoryManagerCurrentDevice(), {(int) pageSizeInVecs, (int) this->d}, defaultStream); DeviceTensor<float, 2, true> bufGpuB( resources_->getMemoryManagerCurrentDevice(), {(int) pageSizeInVecs, (int) this->d}, defaultStream); DeviceTensor<float, 2, true>* bufGpus[2] = {&bufGpuA, &bufGpuB}; // Copy completion events for the pinned buffers std::unique_ptr<CudaEvent> eventPinnedCopyDone[2]; // Execute completion events for the GPU buffers std::unique_ptr<CudaEvent> eventGpuExecuteDone[2]; // All offsets are in terms of number of vectors; they remain within // int bounds (as this function only handles max in vectors) // Current start offset for buffer 1 int cur1 = 0; int cur1BufIndex = 0; // Current start offset for buffer 2 int cur2 = -1; int cur2BufIndex = 0; // Current start offset for buffer 3 int cur3 = -1; int cur3BufIndex = 0; while (cur3 < n) { // Start async pinned -> GPU copy first (buf 2) if (cur2 != -1 && cur2 < n) { // Copy pinned to GPU int numToCopy = std::min(pageSizeInVecs, n - cur2); // Make sure any previous execution has completed before continuing auto& eventPrev = eventGpuExecuteDone[cur2BufIndex]; if (eventPrev.get()) { eventPrev->streamWaitOnEvent(copyStream); } CUDA_VERIFY(cudaMemcpyAsync(bufGpus[cur2BufIndex]->data(), bufPinned[cur2BufIndex], (size_t) numToCopy * this->d * sizeof(float), cudaMemcpyHostToDevice, copyStream)); // Mark a completion event in this stream eventPinnedCopyDone[cur2BufIndex] = std::move(std::unique_ptr<CudaEvent>(new CudaEvent(copyStream))); // We pick up from here cur3 = cur2; cur2 += numToCopy; cur2BufIndex = (cur2BufIndex == 0) ? 1 : 0; } if (cur3 != -1 && cur3 < n) { // Process on GPU int numToProcess = std::min(pageSizeInVecs, n - cur3); // Make sure the previous copy has completed before continuing auto& eventPrev = eventPinnedCopyDone[cur3BufIndex]; FAISS_ASSERT(eventPrev.get()); eventPrev->streamWaitOnEvent(defaultStream); // Create tensor wrappers // DeviceTensor<float, 2, true> input(bufGpus[cur3BufIndex]->data(), // {numToProcess, this->d}); auto outDistancesSlice = outDistances.narrowOutermost(cur3, numToProcess); auto outIndicesSlice = outIndices.narrowOutermost(cur3, numToProcess); searchImpl_(numToProcess, bufGpus[cur3BufIndex]->data(), k, outDistancesSlice.data(), outIndicesSlice.data(), bitset); // Create completion event eventGpuExecuteDone[cur3BufIndex] = std::move(std::unique_ptr<CudaEvent>(new CudaEvent(defaultStream))); // We pick up from here cur3BufIndex = (cur3BufIndex == 0) ? 1 : 0; cur3 += numToProcess; } if (cur1 < n) { // Copy CPU mem to CPU pinned int numToCopy = std::min(pageSizeInVecs, n - cur1); // Make sure any previous copy has completed before continuing auto& eventPrev = eventPinnedCopyDone[cur1BufIndex]; if (eventPrev.get()) { eventPrev->cpuWaitOnEvent(); } memcpy(bufPinned[cur1BufIndex], x + (size_t) cur1 * this->d, (size_t) numToCopy * this->d * sizeof(float)); // We pick up from here cur2 = cur1; cur1 += numToCopy; cur1BufIndex = (cur1BufIndex == 0) ? 1 : 0; } } } void GpuIndex::compute_residual(const float* x, float* residual, Index::idx_t key) const { FAISS_THROW_MSG("compute_residual not implemented for this type of index"); } void GpuIndex::compute_residual_n(Index::idx_t n, const float* xs, float* residuals, const Index::idx_t* keys) const { FAISS_THROW_MSG("compute_residual_n not implemented for this type of index"); } } } // namespace
e4c339febc27fb2562aab78fd6ffbc319a7c31f9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // TODO: reduce the apparent redundancy of all the code below. #include <cfloat> #include "caffe2/core/context_gpu.h" #include "caffe2/operators/pool_op.h" namespace caffe2 { namespace { class LpPool {}; } // namespace namespace { template <typename T> inline __device__ T cuda_pow(T x, T y); template <typename T> inline __device__ T cuda_abs(T x); template <> inline __device__ float cuda_pow<float>(float x, float y) { return powf(x, y); } template <> inline __device__ double cuda_pow<double>(double x, double y) { return pow(x, y); } template <> inline __device__ float cuda_abs(float x) { return fabsf(x); } template <> inline __device__ double cuda_abs(double x) { return fabs(x); } } namespace { template <typename T> __global__ void LpPoolForwardNCHW( const int nthreads, const T* bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_t, const int pad_l, T* top_data, const T p) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int n = index; int pw = n % pooled_width; n /= pooled_width; int ph = n % pooled_height; n /= pooled_height; int c = n % channels; n /= channels; int hstart = ph * stride_h - pad_t; int wstart = pw * stride_w - pad_l; int hend = min(hstart + kernel_h, height); int wend = min(wstart + kernel_w, width); hstart = max(hstart, 0); wstart = max(wstart, 0); top_data[index] = 0; int bottom_offset = (n * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { top_data[index] += cuda_pow<T>( cuda_abs(bottom_data[bottom_offset + h * width + w]), p); } } top_data[index] = cuda_pow<T>(top_data[index], 1.0 / p); } } template <typename T> __global__ void LpPoolForwardNHWC( const int nthreads, const T* bottom_data, const int num, const int height, const int width, const int channels, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_t, const int pad_l, T* top_data, const T p) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int c = index % channels; int pw = (index / channels) % pooled_width; int ph = (index / channels / pooled_width) % pooled_height; int n = index / channels / pooled_width / pooled_height; int hstart = ph * stride_h - pad_t; int wstart = pw * stride_w - pad_l; int hend = min(hstart + kernel_h, height); int wend = min(wstart + kernel_w, width); hstart = max(hstart, 0); wstart = max(wstart, 0); T output = 0; int bottom_offset = n * height * width * channels + c; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { output += cuda_pow<T>( cuda_abs(bottom_data[bottom_offset + (h * width + w) * channels]), p); } } top_data[index] = cuda_pow<T>(output, 1.0 / p); } } template <typename T> __global__ void LpPoolBackwardNCHW( const int nthreads, const T* const top_diff, const T* const top_data, const T* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_t, const int pad_l, T* const bottom_diff, const int p) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int w = index % width + pad_l; const int h = (index / width) % height + pad_t; const int c = (index / width / height) % channels; const int n = index / width / height / channels; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int pwend = min(w / stride_w + 1, pooled_width); T gradient = 0; const T* const top_diff_slice = top_diff + (n * channels + c) * pooled_height * pooled_width; const T* const top_data_slice = top_data + (n * channels + c) * pooled_height * pooled_width; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { // figure out the pooling size int hstart = ph * stride_h - pad_t; int wstart = pw * stride_w - pad_l; int hend = min(hstart + kernel_h, height); int wend = min(wstart + kernel_w, width); hstart = max(hstart, 0); wstart = max(wstart, 0); gradient += top_diff_slice[ph * pooled_width + pw] * bottom_data[index] * cuda_pow<T>(cuda_abs(bottom_data[index]), p - 2) / cuda_pow<T>(top_data_slice[ph * pooled_width + pw], p - 1); } } bottom_diff[index] = gradient; } } template <typename T> __global__ void LpPoolBackwardNHWC( const int nthreads, const T* const top_diff, const T* const top_data, const T* const bottom_data, const int num, const int height, const int width, const int channels, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_t, const int pad_l, T* const bottom_diff, const T p) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int c = index % channels; const int w = index / channels % width + pad_l; const int h = (index / channels / width) % height + pad_t; const int n = index / channels / width / height; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int pwend = min(w / stride_w + 1, pooled_width); T gradient = 0; const T* const top_diff_slice = top_diff + n * pooled_height * pooled_width * channels + c; const T* const top_data_slice = top_data + n * pooled_height * pooled_width * channels + c; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { // figure out the pooling size int hstart = ph * stride_h - pad_t; int wstart = pw * stride_w - pad_l; int hend = min(hstart + kernel_h, height); int wend = min(wstart + kernel_w, width); hstart = max(hstart, 0); wstart = max(wstart, 0); gradient += top_diff_slice[(ph * pooled_width + pw) * channels] * bottom_data[index] * cuda_pow<T>(cuda_abs(bottom_data[index]), p - 2) / cuda_pow<T>(top_data_slice[(ph * pooled_width + pw) * channels], p - 1); } } bottom_diff[index] = gradient; } } } // namespace template <> bool PoolOp<float, CUDAContext, LpPool>::RunOnDeviceWithOrderNCHW() { auto& X = Input(0); auto* Y = Output(0); ConvPoolOpBase<CUDAContext>::SetOutputSize(X, Y, X.dim32(1)); int output_size = Y->size(); hipLaunchKernelGGL(( LpPoolForwardNCHW<float>), dim3(CAFFE_GET_BLOCKS(output_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), output_size, X.data<float>(), X.dim32(0), X.dim32(1), X.dim32(2), X.dim32(3), Y->dim32(2), Y->dim32(3), kernel_h(), kernel_w(), stride_h(), stride_w(), pad_t(), pad_l(), Y->mutable_data<float>(), OperatorBase::GetSingleArgument<float>("p", 2.0)); return true; } template <> bool PoolOp<float, CUDAContext, LpPool>::RunOnDeviceWithOrderNHWC() { auto& X = Input(0); auto* Y = Output(0); ConvPoolOpBase<CUDAContext>::SetOutputSize(X, Y, X.dim32(3)); int output_size = Y->size(); hipLaunchKernelGGL(( LpPoolForwardNHWC<float>), dim3(CAFFE_GET_BLOCKS(output_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), output_size, X.data<float>(), X.dim32(0), X.dim32(1), X.dim32(2), X.dim32(3), Y->dim32(1), Y->dim32(2), kernel_h(), kernel_w(), stride_h(), stride_w(), pad_t(), pad_l(), Y->mutable_data<float>(), OperatorBase::GetSingleArgument<float>("p", 2.0)); return true; } template <> bool PoolGradientOp<float, CUDAContext, LpPool>:: RunOnDeviceWithOrderNCHW() { auto& X = Input(0); auto& Y = Input(1); auto& dY = Input(2); CAFFE_ENFORCE_EQ(dY.ndim(), 4); auto* dX = Output(0); dX->ResizeLike(X); ConvPoolOpBase<CUDAContext>::ComputePads({X.dim32(2), X.dim32(3)}); hipLaunchKernelGGL(( LpPoolBackwardNCHW<float>), dim3(CAFFE_GET_BLOCKS(X.size())), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), X.size(), dY.data<float>(), Y.data<float>(), X.data<float>(), X.dim32(0), X.dim32(1), X.dim32(2), X.dim32(3), dY.dim32(2), dY.dim32(3), kernel_h(), kernel_w(), stride_h(), stride_w(), pad_t(), pad_l(), dX->mutable_data<float>(), OperatorBase::GetSingleArgument<float>("p", 2.0)); return true; } template <> bool PoolGradientOp<float, CUDAContext, LpPool>:: RunOnDeviceWithOrderNHWC() { auto& X = Input(0); auto& Y = Input(1); auto& dY = Input(2); CAFFE_ENFORCE_EQ(dY.ndim(), 4); auto* dX = Output(0); dX->ResizeLike(X); ConvPoolOpBase<CUDAContext>::ComputePads({X.dim32(1), X.dim32(2)}); hipLaunchKernelGGL(( LpPoolBackwardNHWC<float>), dim3(CAFFE_GET_BLOCKS(X.size())), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), X.size(), dY.data<float>(), Y.data<float>(), X.data<float>(), X.dim32(0), X.dim32(1), X.dim32(2), X.dim32(3), dY.dim32(1), dY.dim32(2), kernel_h(), kernel_w(), stride_h(), stride_w(), pad_t(), pad_l(), dX->mutable_data<float>(), OperatorBase::GetSingleArgument<float>("p", 2.0)); return true; } namespace { REGISTER_CUDA_OPERATOR(LpPool, PoolOp<float, CUDAContext, LpPool>); REGISTER_CUDA_OPERATOR( LpPoolGradient, PoolGradientOp<float, CUDAContext, LpPool>); } }
e4c339febc27fb2562aab78fd6ffbc319a7c31f9.cu
// TODO: reduce the apparent redundancy of all the code below. #include <cfloat> #include "caffe2/core/context_gpu.h" #include "caffe2/operators/pool_op.h" namespace caffe2 { namespace { class LpPool {}; } // namespace namespace { template <typename T> inline __device__ T cuda_pow(T x, T y); template <typename T> inline __device__ T cuda_abs(T x); template <> inline __device__ float cuda_pow<float>(float x, float y) { return powf(x, y); } template <> inline __device__ double cuda_pow<double>(double x, double y) { return pow(x, y); } template <> inline __device__ float cuda_abs(float x) { return fabsf(x); } template <> inline __device__ double cuda_abs(double x) { return fabs(x); } } namespace { template <typename T> __global__ void LpPoolForwardNCHW( const int nthreads, const T* bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_t, const int pad_l, T* top_data, const T p) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int n = index; int pw = n % pooled_width; n /= pooled_width; int ph = n % pooled_height; n /= pooled_height; int c = n % channels; n /= channels; int hstart = ph * stride_h - pad_t; int wstart = pw * stride_w - pad_l; int hend = min(hstart + kernel_h, height); int wend = min(wstart + kernel_w, width); hstart = max(hstart, 0); wstart = max(wstart, 0); top_data[index] = 0; int bottom_offset = (n * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { top_data[index] += cuda_pow<T>( cuda_abs(bottom_data[bottom_offset + h * width + w]), p); } } top_data[index] = cuda_pow<T>(top_data[index], 1.0 / p); } } template <typename T> __global__ void LpPoolForwardNHWC( const int nthreads, const T* bottom_data, const int num, const int height, const int width, const int channels, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_t, const int pad_l, T* top_data, const T p) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int c = index % channels; int pw = (index / channels) % pooled_width; int ph = (index / channels / pooled_width) % pooled_height; int n = index / channels / pooled_width / pooled_height; int hstart = ph * stride_h - pad_t; int wstart = pw * stride_w - pad_l; int hend = min(hstart + kernel_h, height); int wend = min(wstart + kernel_w, width); hstart = max(hstart, 0); wstart = max(wstart, 0); T output = 0; int bottom_offset = n * height * width * channels + c; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { output += cuda_pow<T>( cuda_abs(bottom_data[bottom_offset + (h * width + w) * channels]), p); } } top_data[index] = cuda_pow<T>(output, 1.0 / p); } } template <typename T> __global__ void LpPoolBackwardNCHW( const int nthreads, const T* const top_diff, const T* const top_data, const T* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_t, const int pad_l, T* const bottom_diff, const int p) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int w = index % width + pad_l; const int h = (index / width) % height + pad_t; const int c = (index / width / height) % channels; const int n = index / width / height / channels; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int pwend = min(w / stride_w + 1, pooled_width); T gradient = 0; const T* const top_diff_slice = top_diff + (n * channels + c) * pooled_height * pooled_width; const T* const top_data_slice = top_data + (n * channels + c) * pooled_height * pooled_width; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { // figure out the pooling size int hstart = ph * stride_h - pad_t; int wstart = pw * stride_w - pad_l; int hend = min(hstart + kernel_h, height); int wend = min(wstart + kernel_w, width); hstart = max(hstart, 0); wstart = max(wstart, 0); gradient += top_diff_slice[ph * pooled_width + pw] * bottom_data[index] * cuda_pow<T>(cuda_abs(bottom_data[index]), p - 2) / cuda_pow<T>(top_data_slice[ph * pooled_width + pw], p - 1); } } bottom_diff[index] = gradient; } } template <typename T> __global__ void LpPoolBackwardNHWC( const int nthreads, const T* const top_diff, const T* const top_data, const T* const bottom_data, const int num, const int height, const int width, const int channels, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_t, const int pad_l, T* const bottom_diff, const T p) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int c = index % channels; const int w = index / channels % width + pad_l; const int h = (index / channels / width) % height + pad_t; const int n = index / channels / width / height; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int pwend = min(w / stride_w + 1, pooled_width); T gradient = 0; const T* const top_diff_slice = top_diff + n * pooled_height * pooled_width * channels + c; const T* const top_data_slice = top_data + n * pooled_height * pooled_width * channels + c; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { // figure out the pooling size int hstart = ph * stride_h - pad_t; int wstart = pw * stride_w - pad_l; int hend = min(hstart + kernel_h, height); int wend = min(wstart + kernel_w, width); hstart = max(hstart, 0); wstart = max(wstart, 0); gradient += top_diff_slice[(ph * pooled_width + pw) * channels] * bottom_data[index] * cuda_pow<T>(cuda_abs(bottom_data[index]), p - 2) / cuda_pow<T>(top_data_slice[(ph * pooled_width + pw) * channels], p - 1); } } bottom_diff[index] = gradient; } } } // namespace template <> bool PoolOp<float, CUDAContext, LpPool>::RunOnDeviceWithOrderNCHW() { auto& X = Input(0); auto* Y = Output(0); ConvPoolOpBase<CUDAContext>::SetOutputSize(X, Y, X.dim32(1)); int output_size = Y->size(); LpPoolForwardNCHW<float><<< CAFFE_GET_BLOCKS(output_size), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( output_size, X.data<float>(), X.dim32(0), X.dim32(1), X.dim32(2), X.dim32(3), Y->dim32(2), Y->dim32(3), kernel_h(), kernel_w(), stride_h(), stride_w(), pad_t(), pad_l(), Y->mutable_data<float>(), OperatorBase::GetSingleArgument<float>("p", 2.0)); return true; } template <> bool PoolOp<float, CUDAContext, LpPool>::RunOnDeviceWithOrderNHWC() { auto& X = Input(0); auto* Y = Output(0); ConvPoolOpBase<CUDAContext>::SetOutputSize(X, Y, X.dim32(3)); int output_size = Y->size(); LpPoolForwardNHWC<float><<< CAFFE_GET_BLOCKS(output_size), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( output_size, X.data<float>(), X.dim32(0), X.dim32(1), X.dim32(2), X.dim32(3), Y->dim32(1), Y->dim32(2), kernel_h(), kernel_w(), stride_h(), stride_w(), pad_t(), pad_l(), Y->mutable_data<float>(), OperatorBase::GetSingleArgument<float>("p", 2.0)); return true; } template <> bool PoolGradientOp<float, CUDAContext, LpPool>:: RunOnDeviceWithOrderNCHW() { auto& X = Input(0); auto& Y = Input(1); auto& dY = Input(2); CAFFE_ENFORCE_EQ(dY.ndim(), 4); auto* dX = Output(0); dX->ResizeLike(X); ConvPoolOpBase<CUDAContext>::ComputePads({X.dim32(2), X.dim32(3)}); LpPoolBackwardNCHW<float><<< CAFFE_GET_BLOCKS(X.size()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( X.size(), dY.data<float>(), Y.data<float>(), X.data<float>(), X.dim32(0), X.dim32(1), X.dim32(2), X.dim32(3), dY.dim32(2), dY.dim32(3), kernel_h(), kernel_w(), stride_h(), stride_w(), pad_t(), pad_l(), dX->mutable_data<float>(), OperatorBase::GetSingleArgument<float>("p", 2.0)); return true; } template <> bool PoolGradientOp<float, CUDAContext, LpPool>:: RunOnDeviceWithOrderNHWC() { auto& X = Input(0); auto& Y = Input(1); auto& dY = Input(2); CAFFE_ENFORCE_EQ(dY.ndim(), 4); auto* dX = Output(0); dX->ResizeLike(X); ConvPoolOpBase<CUDAContext>::ComputePads({X.dim32(1), X.dim32(2)}); LpPoolBackwardNHWC<float><<< CAFFE_GET_BLOCKS(X.size()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( X.size(), dY.data<float>(), Y.data<float>(), X.data<float>(), X.dim32(0), X.dim32(1), X.dim32(2), X.dim32(3), dY.dim32(1), dY.dim32(2), kernel_h(), kernel_w(), stride_h(), stride_w(), pad_t(), pad_l(), dX->mutable_data<float>(), OperatorBase::GetSingleArgument<float>("p", 2.0)); return true; } namespace { REGISTER_CUDA_OPERATOR(LpPool, PoolOp<float, CUDAContext, LpPool>); REGISTER_CUDA_OPERATOR( LpPoolGradient, PoolGradientOp<float, CUDAContext, LpPool>); } }
010fe82450d9411cad83c872e5c1d1186ed579af.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #define Width 32 // size of Width x Width matrix #define TILE_WIDTH 16 __global__ void MatrixMulKernel (float* Md, float* Nd, float* Pd){ __shared__ float shared_A[TILE_WIDTH][TILE_WIDTH]; __shared__ float shared_B[TILE_WIDTH][TILE_WIDTH]; int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; //Pvalue is used to store the element of the output matrix // that is computed by the thread float Pvalue = 0; for (int k = 0; k < Width/TILE_WIDTH; k++){ // y = row , x = col shared_A[threadIdx.y][threadIdx.x] = Md[row*Width+(k * TILE_WIDTH + threadIdx.x)]; shared_B[threadIdx.y][threadIdx.x] = Nd[(k * TILE_WIDTH + threadIdx.y)*Width+col]; __syncthreads(); // similar to barrier for (int x = 0; x < TILE_WIDTH; x++){ Pvalue += shared_A[threadIdx.y][x] * shared_B[x][threadIdx.x]; } __syncthreads(); // similar to barrier } Pd[row*Width+col] = Pvalue; } int main (int argc, char *argv[]){ int i,j; int size = Width * Width * sizeof(float); float M[Width][Width], N[Width][Width], P[Width][Width]; float* Md, *Nd, *Pd; for (i = 0; i < Width; i++){ for (j = 0; j < Width; j++){ M[i][j] = 1; N[i][j] = 2; } } hipMalloc( (void**)&Md, size); hipMalloc( (void**)&Nd, size); hipMalloc( (void**)&Pd, size); hipMemcpy( Md, M, size, hipMemcpyHostToDevice); hipMemcpy( Nd, N, size, hipMemcpyHostToDevice); //Setup the execution configuration dim3 dimBlock(TILE_WIDTH, TILE_WIDTH); dim3 dimGrid(Width/TILE_WIDTH, Width/TILE_WIDTH); //Launch the device computation threads! hipLaunchKernelGGL(( MatrixMulKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, Md, Nd, Pd); //Read P from the device hipMemcpy( P, Pd, size, hipMemcpyDeviceToHost); //Free device matrices hipFree( Md); hipFree( Nd); hipFree( Pd); for (i = 0; i < Width; i++){ for (j = 0; j < Width; j++){ printf("%.2f ",P[i][j]); } printf("\n"); } }
010fe82450d9411cad83c872e5c1d1186ed579af.cu
#include <stdio.h> #define Width 32 // size of Width x Width matrix #define TILE_WIDTH 16 __global__ void MatrixMulKernel (float* Md, float* Nd, float* Pd){ __shared__ float shared_A[TILE_WIDTH][TILE_WIDTH]; __shared__ float shared_B[TILE_WIDTH][TILE_WIDTH]; int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; //Pvalue is used to store the element of the output matrix // that is computed by the thread float Pvalue = 0; for (int k = 0; k < Width/TILE_WIDTH; k++){ // y = row , x = col shared_A[threadIdx.y][threadIdx.x] = Md[row*Width+(k * TILE_WIDTH + threadIdx.x)]; shared_B[threadIdx.y][threadIdx.x] = Nd[(k * TILE_WIDTH + threadIdx.y)*Width+col]; __syncthreads(); // similar to barrier for (int x = 0; x < TILE_WIDTH; x++){ Pvalue += shared_A[threadIdx.y][x] * shared_B[x][threadIdx.x]; } __syncthreads(); // similar to barrier } Pd[row*Width+col] = Pvalue; } int main (int argc, char *argv[]){ int i,j; int size = Width * Width * sizeof(float); float M[Width][Width], N[Width][Width], P[Width][Width]; float* Md, *Nd, *Pd; for (i = 0; i < Width; i++){ for (j = 0; j < Width; j++){ M[i][j] = 1; N[i][j] = 2; } } cudaMalloc( (void**)&Md, size); cudaMalloc( (void**)&Nd, size); cudaMalloc( (void**)&Pd, size); cudaMemcpy( Md, M, size, cudaMemcpyHostToDevice); cudaMemcpy( Nd, N, size, cudaMemcpyHostToDevice); //Setup the execution configuration dim3 dimBlock(TILE_WIDTH, TILE_WIDTH); dim3 dimGrid(Width/TILE_WIDTH, Width/TILE_WIDTH); //Launch the device computation threads! MatrixMulKernel<<<dimGrid, dimBlock>>>(Md, Nd, Pd); //Read P from the device cudaMemcpy( P, Pd, size, cudaMemcpyDeviceToHost); //Free device matrices cudaFree( Md); cudaFree( Nd); cudaFree( Pd); for (i = 0; i < Width; i++){ for (j = 0; j < Width; j++){ printf("%.2f ",P[i][j]); } printf("\n"); } }
eeda9936bb79595f55a733afe35a7217015d3d82.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <assert.h> #include <hipcub/hipcub.hpp> #include "caffe2/core/context_gpu.h" #include "caffe2/operators/cross_entropy_op.h" #include "caffe2/operators/operator_fallback_gpu.h" namespace caffe2 { namespace { __global__ void LabelCrossEntropyKernel( const int N, const int D, const float* Xdata, const int* labeldata, const float log_threshold, float* Ydata) { CUDA_1D_KERNEL_LOOP(i, N) { CUDA_KERNEL_ASSERT(labeldata[i] >= 0 && labeldata[i] < D); Ydata[i] = -logf(max(Xdata[i * D + labeldata[i]], log_threshold)); } } __global__ void LabelCrossEntropyGradientKernel( const int N, const int D, const float* Xdata, const int* labeldata, const float* dYdata, const float log_threshold, float* dXdata) { CUDA_1D_KERNEL_LOOP(i, N) { int idx = i * D + labeldata[i]; dXdata[idx] = - dYdata[i] / max(Xdata[idx], log_threshold); } } } // namespace template <> bool LabelCrossEntropyOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); auto& label = Input(1); auto* Y = Output(0); int N, D; if (X.ndim() > 1) { N = X.dim32(0); D = X.size_from_dim(1); } else { N = 1; D = X.dim32(0); } CAFFE_ENFORCE( (label.ndim() == 1) || (label.ndim() == 2 && label.dim32(1) == 1)); CAFFE_ENFORCE_EQ(label.dim32(0), N); Y->Resize(vector<TIndex>(size_t(1), N)); hipLaunchKernelGGL(( LabelCrossEntropyKernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, D, X.data<float>(), label.data<int>(), kLOG_THRESHOLD(), Y->mutable_data<float>()); return true; } template <> bool LabelCrossEntropyGradientOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); auto& label = Input(1); auto& dY = Input(2); auto* dX = Output(0); int N, D; if (X.ndim() > 1) { N = X.dim32(0); D = X.size_from_dim(1); } else { N = 1; D = X.dim32(0); } CAFFE_ENFORCE( (label.ndim() == 1) || (label.ndim() == 2 && label.dim32(1) == 1)); CAFFE_ENFORCE_EQ(label.dim32(0), N); CAFFE_ENFORCE_EQ(dY.ndim(), 1); CAFFE_ENFORCE_EQ(dY.dim32(0), N); dX->ResizeLike(X); math::Set<float, CUDAContext>( dX->size(), 0.f, dX->mutable_data<float>(), &context_); hipLaunchKernelGGL(( LabelCrossEntropyGradientKernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, D, X.data<float>(), label.data<int>(), dY.data<float>(), kLOG_THRESHOLD(), dX->mutable_data<float>()); return true; } namespace { __global__ void MakeTwoClassKernel( const int N, const float* Xdata, float* Ydata) { CUDA_1D_KERNEL_LOOP(i, N) { Ydata[i * 2] = 1.0 - Xdata[i]; Ydata[i * 2 + 1] = Xdata[i]; } } __global__ void MakeTwoClassGradientKernel( const int N, const float* dYdata, float* dXdata) { CUDA_1D_KERNEL_LOOP(i, N) { dXdata[i] = dYdata[i * 2 + 1] - dYdata[i * 2]; } } } // namespace template <> bool MakeTwoClassOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); auto* Y = Output(0); auto shape = X.dims(); shape.push_back(2); CAFFE_ENFORCE_LT(X.size(), std::numeric_limits<int>::max() / 2); Y->Resize(shape); int N = X.size(); hipLaunchKernelGGL(( MakeTwoClassKernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, X.data<float>(), Y->mutable_data<float>()); return true; } template <> bool MakeTwoClassGradientOp<float, CUDAContext>::RunOnDevice() { auto& dY = Input(0); auto* dX = Output(0); auto shape = dY.dims(); CAFFE_ENFORCE_GE(shape.size(), 1); CAFFE_ENFORCE_EQ(shape.back(), 2); shape.pop_back(); CAFFE_ENFORCE_LT(dY.size(), std::numeric_limits<int>::max()); dX->Resize(shape); int N = dX->size(); hipLaunchKernelGGL(( MakeTwoClassGradientKernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, dY.data<float>(), dX->mutable_data<float>()); return true; } namespace { __device__ float sigmoid_xent_forward(float lgt, float tgt) { return lgt * (tgt - (lgt >= 0)) - log(1 + exp(lgt - 2 * lgt * (lgt >= 0))); } __device__ float sigmoid_xent_backward(float lgt, float tgt) { return tgt - 1. / (1. + exp(-lgt)); } __global__ void SigmoidCrossEntropyWithLogitsKernel( const int outer_size, const int inner_size, const float* logits_ptr, const float* targets_ptr, float* out_ptr) { int i = blockIdx.x; int last_idx = (i + 1) * inner_size; float value = 0; for (int in_idx = i * inner_size + threadIdx.x; in_idx < last_idx; in_idx += blockDim.x) { value += sigmoid_xent_forward(logits_ptr[in_idx], targets_ptr[in_idx]); } typedef hipcub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; float sum = BlockReduce(temp_storage).Sum(value); if (threadIdx.x == 0) { out_ptr[i] = -sum / inner_size; } } __global__ void SigmoidCrossEntropyGradientWithLogitsKernel( const int outer_size, const int inner_size, const float* g_ptr, const float* logits_ptr, const float* targets_ptr, float* out_ptr) { CUDA_1D_KERNEL_LOOP(in_idx, outer_size * inner_size) { int i = in_idx / inner_size; auto g_factor = -g_ptr[i] / inner_size; out_ptr[in_idx] = g_factor * sigmoid_xent_backward(logits_ptr[in_idx], targets_ptr[in_idx]); } } } // namespace template <> bool SigmoidCrossEntropyWithLogitsOp<float, CUDAContext>::RunOnDevice() { auto& logits = Input(0); auto& targets = Input(1); CAFFE_ENFORCE(logits.dims() == targets.dims()); const auto inner_size = logits.ndim() > 0 ? logits.dims().back() : 1; const auto outer_size = logits.size() / inner_size; auto* out = Output(0); if (logits.ndim() == 0) { out->Resize(std::vector<TIndex>{}); } else { std::vector<TIndex> dims(logits.dims().begin(), logits.dims().end() - 1); out->Resize(dims); } auto* out_ptr = out->mutable_data<float>(); auto* logits_ptr = logits.data<float>(); auto* targets_ptr = targets.data<float>(); hipLaunchKernelGGL(( SigmoidCrossEntropyWithLogitsKernel), dim3(outer_size), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), outer_size, inner_size, logits_ptr, targets_ptr, out_ptr); return true; } template <> bool SigmoidCrossEntropyWithLogitsGradientOp<float, CUDAContext>:: RunOnDevice() { auto& g = Input(0); auto& logits = Input(1); auto& targets = Input(2); CAFFE_ENFORCE(logits.dims() == targets.dims()); const auto inner_size = logits.ndim() > 0 ? logits.dims().back() : 1; const auto outer_size = logits.size() / inner_size; CAFFE_ENFORCE(g.size() == outer_size); auto* out = Output(0); out->ResizeLike(logits); auto* out_ptr = out->mutable_data<float>(); auto* logits_ptr = logits.data<float>(); auto* targets_ptr = targets.data<float>(); auto* g_ptr = g.data<float>(); hipLaunchKernelGGL(( SigmoidCrossEntropyGradientWithLogitsKernel), dim3(CAFFE_GET_BLOCKS(outer_size * inner_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), outer_size, inner_size, g_ptr, logits_ptr, targets_ptr, out_ptr); return true; } REGISTER_CUDA_OPERATOR(LabelCrossEntropy, LabelCrossEntropyOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(LabelCrossEntropyGradient, LabelCrossEntropyGradientOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR( SigmoidCrossEntropyWithLogits, SigmoidCrossEntropyWithLogitsOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR( SigmoidCrossEntropyWithLogitsGradient, SigmoidCrossEntropyWithLogitsGradientOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(MakeTwoClass, MakeTwoClassOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(MakeTwoClassGradient, MakeTwoClassGradientOp<float, CUDAContext>); //TODO(surya) Add full GPU/CUDA support for the CrossEntropyOp REGISTER_CUDA_OPERATOR(CrossEntropy, GPUFallbackOp<CrossEntropyOp<float, CPUContext>>); REGISTER_CUDA_OPERATOR(CrossEntropyGradient, GPUFallbackOp<CrossEntropyGradientOp<float, CPUContext>>); } // namespace caffe2
eeda9936bb79595f55a733afe35a7217015d3d82.cu
#include <assert.h> #include <cub/block/block_reduce.cuh> #include "caffe2/core/context_gpu.h" #include "caffe2/operators/cross_entropy_op.h" #include "caffe2/operators/operator_fallback_gpu.h" namespace caffe2 { namespace { __global__ void LabelCrossEntropyKernel( const int N, const int D, const float* Xdata, const int* labeldata, const float log_threshold, float* Ydata) { CUDA_1D_KERNEL_LOOP(i, N) { CUDA_KERNEL_ASSERT(labeldata[i] >= 0 && labeldata[i] < D); Ydata[i] = -logf(max(Xdata[i * D + labeldata[i]], log_threshold)); } } __global__ void LabelCrossEntropyGradientKernel( const int N, const int D, const float* Xdata, const int* labeldata, const float* dYdata, const float log_threshold, float* dXdata) { CUDA_1D_KERNEL_LOOP(i, N) { int idx = i * D + labeldata[i]; dXdata[idx] = - dYdata[i] / max(Xdata[idx], log_threshold); } } } // namespace template <> bool LabelCrossEntropyOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); auto& label = Input(1); auto* Y = Output(0); int N, D; if (X.ndim() > 1) { N = X.dim32(0); D = X.size_from_dim(1); } else { N = 1; D = X.dim32(0); } CAFFE_ENFORCE( (label.ndim() == 1) || (label.ndim() == 2 && label.dim32(1) == 1)); CAFFE_ENFORCE_EQ(label.dim32(0), N); Y->Resize(vector<TIndex>(size_t(1), N)); LabelCrossEntropyKernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, D, X.data<float>(), label.data<int>(), kLOG_THRESHOLD(), Y->mutable_data<float>()); return true; } template <> bool LabelCrossEntropyGradientOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); auto& label = Input(1); auto& dY = Input(2); auto* dX = Output(0); int N, D; if (X.ndim() > 1) { N = X.dim32(0); D = X.size_from_dim(1); } else { N = 1; D = X.dim32(0); } CAFFE_ENFORCE( (label.ndim() == 1) || (label.ndim() == 2 && label.dim32(1) == 1)); CAFFE_ENFORCE_EQ(label.dim32(0), N); CAFFE_ENFORCE_EQ(dY.ndim(), 1); CAFFE_ENFORCE_EQ(dY.dim32(0), N); dX->ResizeLike(X); math::Set<float, CUDAContext>( dX->size(), 0.f, dX->mutable_data<float>(), &context_); LabelCrossEntropyGradientKernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, D, X.data<float>(), label.data<int>(), dY.data<float>(), kLOG_THRESHOLD(), dX->mutable_data<float>()); return true; } namespace { __global__ void MakeTwoClassKernel( const int N, const float* Xdata, float* Ydata) { CUDA_1D_KERNEL_LOOP(i, N) { Ydata[i * 2] = 1.0 - Xdata[i]; Ydata[i * 2 + 1] = Xdata[i]; } } __global__ void MakeTwoClassGradientKernel( const int N, const float* dYdata, float* dXdata) { CUDA_1D_KERNEL_LOOP(i, N) { dXdata[i] = dYdata[i * 2 + 1] - dYdata[i * 2]; } } } // namespace template <> bool MakeTwoClassOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); auto* Y = Output(0); auto shape = X.dims(); shape.push_back(2); CAFFE_ENFORCE_LT(X.size(), std::numeric_limits<int>::max() / 2); Y->Resize(shape); int N = X.size(); MakeTwoClassKernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, X.data<float>(), Y->mutable_data<float>()); return true; } template <> bool MakeTwoClassGradientOp<float, CUDAContext>::RunOnDevice() { auto& dY = Input(0); auto* dX = Output(0); auto shape = dY.dims(); CAFFE_ENFORCE_GE(shape.size(), 1); CAFFE_ENFORCE_EQ(shape.back(), 2); shape.pop_back(); CAFFE_ENFORCE_LT(dY.size(), std::numeric_limits<int>::max()); dX->Resize(shape); int N = dX->size(); MakeTwoClassGradientKernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, dY.data<float>(), dX->mutable_data<float>()); return true; } namespace { __device__ float sigmoid_xent_forward(float lgt, float tgt) { return lgt * (tgt - (lgt >= 0)) - log(1 + exp(lgt - 2 * lgt * (lgt >= 0))); } __device__ float sigmoid_xent_backward(float lgt, float tgt) { return tgt - 1. / (1. + exp(-lgt)); } __global__ void SigmoidCrossEntropyWithLogitsKernel( const int outer_size, const int inner_size, const float* logits_ptr, const float* targets_ptr, float* out_ptr) { int i = blockIdx.x; int last_idx = (i + 1) * inner_size; float value = 0; for (int in_idx = i * inner_size + threadIdx.x; in_idx < last_idx; in_idx += blockDim.x) { value += sigmoid_xent_forward(logits_ptr[in_idx], targets_ptr[in_idx]); } typedef cub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; float sum = BlockReduce(temp_storage).Sum(value); if (threadIdx.x == 0) { out_ptr[i] = -sum / inner_size; } } __global__ void SigmoidCrossEntropyGradientWithLogitsKernel( const int outer_size, const int inner_size, const float* g_ptr, const float* logits_ptr, const float* targets_ptr, float* out_ptr) { CUDA_1D_KERNEL_LOOP(in_idx, outer_size * inner_size) { int i = in_idx / inner_size; auto g_factor = -g_ptr[i] / inner_size; out_ptr[in_idx] = g_factor * sigmoid_xent_backward(logits_ptr[in_idx], targets_ptr[in_idx]); } } } // namespace template <> bool SigmoidCrossEntropyWithLogitsOp<float, CUDAContext>::RunOnDevice() { auto& logits = Input(0); auto& targets = Input(1); CAFFE_ENFORCE(logits.dims() == targets.dims()); const auto inner_size = logits.ndim() > 0 ? logits.dims().back() : 1; const auto outer_size = logits.size() / inner_size; auto* out = Output(0); if (logits.ndim() == 0) { out->Resize(std::vector<TIndex>{}); } else { std::vector<TIndex> dims(logits.dims().begin(), logits.dims().end() - 1); out->Resize(dims); } auto* out_ptr = out->mutable_data<float>(); auto* logits_ptr = logits.data<float>(); auto* targets_ptr = targets.data<float>(); SigmoidCrossEntropyWithLogitsKernel<<< outer_size, CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( outer_size, inner_size, logits_ptr, targets_ptr, out_ptr); return true; } template <> bool SigmoidCrossEntropyWithLogitsGradientOp<float, CUDAContext>:: RunOnDevice() { auto& g = Input(0); auto& logits = Input(1); auto& targets = Input(2); CAFFE_ENFORCE(logits.dims() == targets.dims()); const auto inner_size = logits.ndim() > 0 ? logits.dims().back() : 1; const auto outer_size = logits.size() / inner_size; CAFFE_ENFORCE(g.size() == outer_size); auto* out = Output(0); out->ResizeLike(logits); auto* out_ptr = out->mutable_data<float>(); auto* logits_ptr = logits.data<float>(); auto* targets_ptr = targets.data<float>(); auto* g_ptr = g.data<float>(); SigmoidCrossEntropyGradientWithLogitsKernel<<< CAFFE_GET_BLOCKS(outer_size * inner_size), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( outer_size, inner_size, g_ptr, logits_ptr, targets_ptr, out_ptr); return true; } REGISTER_CUDA_OPERATOR(LabelCrossEntropy, LabelCrossEntropyOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(LabelCrossEntropyGradient, LabelCrossEntropyGradientOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR( SigmoidCrossEntropyWithLogits, SigmoidCrossEntropyWithLogitsOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR( SigmoidCrossEntropyWithLogitsGradient, SigmoidCrossEntropyWithLogitsGradientOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(MakeTwoClass, MakeTwoClassOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(MakeTwoClassGradient, MakeTwoClassGradientOp<float, CUDAContext>); //TODO(surya) Add full GPU/CUDA support for the CrossEntropyOp REGISTER_CUDA_OPERATOR(CrossEntropy, GPUFallbackOp<CrossEntropyOp<float, CPUContext>>); REGISTER_CUDA_OPERATOR(CrossEntropyGradient, GPUFallbackOp<CrossEntropyGradientOp<float, CPUContext>>); } // namespace caffe2
32e0677d9c2b73e6fb0a438636e074fb0008eca8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/phi/kernels/c_embedding_kernel.h" #include "glog/logging.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/backends/gpu/gpu_primitives.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/funcs/eigen/common.h" #include "paddle/phi/kernels/funcs/embedding_grad.h" #include "paddle/utils/flags.h" PD_DECLARE_int64(embedding_deterministic); namespace phi { static constexpr int kNumCUDAThreads = 512; static constexpr int kNumMaxinumNumBlocks = 4096; static inline int NumBlocks(const int N) { return ::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads, kNumMaxinumNumBlocks); } template <typename T, typename IndexT> __global__ void CEmbeddingGrad(T* table, const T* output, const IndexT* ids, const int rows, const int columns, const int64_t N, const int64_t start_idx, const int64_t end_idx, const int64_t limit) { CUDA_KERNEL_LOOP(i, limit) { size_t row = i / columns; size_t col = i % columns; auto id = ids[row]; if (id >= start_idx && id < end_idx) { auto real_idx = id - start_idx; phi::CudaAtomicAdd(&table[real_idx * columns + col], output[i]); } } } template <typename T, typename Context> void CEmbeddingGradKernel(const Context& dev_ctx, const DenseTensor& w, const DenseTensor& ids, const DenseTensor& out_grad, int64_t start_index, DenseTensor* w_grad) { int N = w_grad->dims()[0]; int D = w_grad->dims()[1]; int K = ids.numel(); auto limit = K * D; int blocks = NumBlocks(limit); int threads = kNumCUDAThreads; const T* d_output = out_grad.data<T>(); T* d_table = dev_ctx.template Alloc<T>(w_grad); auto t = EigenVector<T>::Flatten(*w_grad); t.device(*dev_ctx.eigen_device()) = t.constant(static_cast<T>(0)); const auto& index_type = ids.dtype(); if (FLAGS_embedding_deterministic == 1) { if (index_type == phi::DataType::INT32) { phi::funcs::LaunchEmbeddingGradDeterministicKernel<T, int32_t>( dev_ctx, ids.data<int32_t>(), d_output, d_table, N, D, K, start_index); return; } else if (index_type == phi::DataType::INT64) { phi::funcs::LaunchEmbeddingGradDeterministicKernel<T, int64_t>( dev_ctx, ids.data<int64_t>(), d_output, d_table, N, D, K, start_index); return; } } else { if (FLAGS_embedding_deterministic > 1) { VLOG(2) << "Run grad kernel of embedding with single thread."; blocks = 1; } const int64_t end_idx = start_index + N; if (index_type == phi::DataType::INT32) { hipLaunchKernelGGL(( CEmbeddingGrad<T, int32_t>) , dim3(blocks), dim3(threads), 0, dev_ctx.stream(), d_table, d_output, ids.data<int32_t>(), K, D, N, start_index, end_idx, limit); return; } else if (index_type == phi::DataType::INT64) { hipLaunchKernelGGL(( CEmbeddingGrad<T, int64_t>) , dim3(blocks), dim3(threads), 0, dev_ctx.stream(), d_table, d_output, ids.data<int64_t>(), K, D, N, start_index, end_idx, limit); return; } } PADDLE_THROW(phi::errors::InvalidArgument( "The data type of Input(Ids) must be int32 or int64.")); } } // namespace phi #if NCCL_VERSION_CODE >= 21000 && TORCH_HIP_VERSION >= 11000 PD_REGISTER_KERNEL(c_embedding_grad, GPU, ALL_LAYOUT, phi::CEmbeddingGradKernel, float, double, phi::dtype::bfloat16, phi::dtype::float16) {} #else PD_REGISTER_KERNEL(c_embedding_grad, GPU, ALL_LAYOUT, phi::CEmbeddingGradKernel, float, double, phi::dtype::float16) {} #endif
32e0677d9c2b73e6fb0a438636e074fb0008eca8.cu
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/phi/kernels/c_embedding_kernel.h" #include "glog/logging.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/backends/gpu/gpu_primitives.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/funcs/eigen/common.h" #include "paddle/phi/kernels/funcs/embedding_grad.h" #include "paddle/utils/flags.h" PD_DECLARE_int64(embedding_deterministic); namespace phi { static constexpr int kNumCUDAThreads = 512; static constexpr int kNumMaxinumNumBlocks = 4096; static inline int NumBlocks(const int N) { return std::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads, kNumMaxinumNumBlocks); } template <typename T, typename IndexT> __global__ void CEmbeddingGrad(T* table, const T* output, const IndexT* ids, const int rows, const int columns, const int64_t N, const int64_t start_idx, const int64_t end_idx, const int64_t limit) { CUDA_KERNEL_LOOP(i, limit) { size_t row = i / columns; size_t col = i % columns; auto id = ids[row]; if (id >= start_idx && id < end_idx) { auto real_idx = id - start_idx; phi::CudaAtomicAdd(&table[real_idx * columns + col], output[i]); } } } template <typename T, typename Context> void CEmbeddingGradKernel(const Context& dev_ctx, const DenseTensor& w, const DenseTensor& ids, const DenseTensor& out_grad, int64_t start_index, DenseTensor* w_grad) { int N = w_grad->dims()[0]; int D = w_grad->dims()[1]; int K = ids.numel(); auto limit = K * D; int blocks = NumBlocks(limit); int threads = kNumCUDAThreads; const T* d_output = out_grad.data<T>(); T* d_table = dev_ctx.template Alloc<T>(w_grad); auto t = EigenVector<T>::Flatten(*w_grad); t.device(*dev_ctx.eigen_device()) = t.constant(static_cast<T>(0)); const auto& index_type = ids.dtype(); if (FLAGS_embedding_deterministic == 1) { if (index_type == phi::DataType::INT32) { phi::funcs::LaunchEmbeddingGradDeterministicKernel<T, int32_t>( dev_ctx, ids.data<int32_t>(), d_output, d_table, N, D, K, start_index); return; } else if (index_type == phi::DataType::INT64) { phi::funcs::LaunchEmbeddingGradDeterministicKernel<T, int64_t>( dev_ctx, ids.data<int64_t>(), d_output, d_table, N, D, K, start_index); return; } } else { if (FLAGS_embedding_deterministic > 1) { VLOG(2) << "Run grad kernel of embedding with single thread."; blocks = 1; } const int64_t end_idx = start_index + N; if (index_type == phi::DataType::INT32) { CEmbeddingGrad<T, int32_t> <<<blocks, threads, 0, dev_ctx.stream()>>>(d_table, d_output, ids.data<int32_t>(), K, D, N, start_index, end_idx, limit); return; } else if (index_type == phi::DataType::INT64) { CEmbeddingGrad<T, int64_t> <<<blocks, threads, 0, dev_ctx.stream()>>>(d_table, d_output, ids.data<int64_t>(), K, D, N, start_index, end_idx, limit); return; } } PADDLE_THROW(phi::errors::InvalidArgument( "The data type of Input(Ids) must be int32 or int64.")); } } // namespace phi #if NCCL_VERSION_CODE >= 21000 && CUDA_VERSION >= 11000 PD_REGISTER_KERNEL(c_embedding_grad, GPU, ALL_LAYOUT, phi::CEmbeddingGradKernel, float, double, phi::dtype::bfloat16, phi::dtype::float16) {} #else PD_REGISTER_KERNEL(c_embedding_grad, GPU, ALL_LAYOUT, phi::CEmbeddingGradKernel, float, double, phi::dtype::float16) {} #endif
16a4188b6d857349c4cd2ec8c0a5b0460e274293.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <glew.h> #include <GLFW/glfw3.h> #include <hip/hip_runtime.h> #include <cuda_gl_interop.h> #include <helper_cuda.h> #include <helper_math.h> #include "line.cuh" #include "lineShader.hpp" #include "lib.hpp" int LineStructSize = (int)sizeof(LineStruct); __global__ void moveLines(LineStruct* line, LineFTStruct* ft, PointStruct* point, int n) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i > n) return; line[i].pos.f = point[ ft[i].f ].pos; line[i].pos.t = point[ ft[i].t ].pos; } Line::Line(int _count) { this->count = _count; this->size = LineStructSize * _count; this->data = (LineStruct*)malloc(this->size); this->ft = (LineFTStruct*)malloc(sizeof(LineFTStruct) * _count); this->data[0].pos.f = {0, 0, 0}; this->data[0].pos.t = {0.5, 0.5, 0}; this->ft[0].f = 0; this->ft[0].t = 1; this->ftdptr = NULL; checkCudaErrors(hipMalloc((void **)&this->ftdptr, (int)sizeof(LineFTStruct) * _count)); checkCudaErrors(hipMemcpy(this->ftdptr, this->ft, sizeof(LineFTStruct) * _count, hipMemcpyHostToDevice)); // for (int i = 0; i < this->count; i++) { // this->data[i].pos.f = {trand(), trand(), 0}; // this->data[i].pos.t = {trand(), trand(), 0}; // } glGenBuffers(1, &this->VBO); this->dptr = NULL; this->lineShaderProgram = getLineShaderProgram(); glBindBuffer(GL_ARRAY_BUFFER, this->VBO); glBufferData(GL_ARRAY_BUFFER, this->size, this->data, GL_DYNAMIC_DRAW); checkCudaErrors(hipGraphicsGLRegisterBuffer(&this->cuda_vbo_resource, this->VBO, hipGraphicsMapFlagsNone)); glBindBuffer(GL_ARRAY_BUFFER, 0); } void Line::add(int f, int t) { this->count++; this->d2h(); // this->data, // this->ft this->h2d(); } void Line::bindVBO() { checkCudaErrors(hipGraphicsMapResources(1, &this->cuda_vbo_resource, 0)); size_t num_bytes; checkCudaErrors(hipGraphicsResourceGetMappedPointer((void **)&this->dptr, &num_bytes, this->cuda_vbo_resource)); } void Line::unbindVBO() { checkCudaErrors(hipGraphicsUnmapResources(1, &this->cuda_vbo_resource, 0)); } void Point::d2h() { this->bindVBO(); checkCudaErrors(hipMemcpy((void *)this->data, this->dptr, this->size(), hipMemcpyDeviceToHost)); this->unbindVBO(); } void Point::h2d() { this->bindVBO(); checkCudaErrors(hipMemcpy(this->dptr, (void *)this->data, this->size(), hipMemcpyHostToDevice)); this->unbindVBO(); } void Line::draw() { glEnable(GL_LINE_SMOOTH); glUseProgram(this->lineShaderProgram); glLineWidth(2.0); //glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, LineStructSize, (GLvoid *)0); glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, (GLvoid *)0); //glVertexPointer(3, GL_FLOAT, LineStructSize, 0); glVertexPointer(3, GL_FLOAT, 0, 0); //glInterleavedArrays(GL_V2F, 0, NULL); glDrawArrays(GL_LINES, 0, this->count * 2); glDisable(GL_LINE_SMOOTH); } void Line::tick(Point* p1) { int THREADS_PER_BLOCK = 1024; int threads = this->count % THREADS_PER_BLOCK; int blocks = (this->count + threads) / threads; hipLaunchKernelGGL(( moveLines), dim3(blocks), dim3(threads), 0, 0, this->dptr, this->ftdptr, p1->dptr, this->count); }
16a4188b6d857349c4cd2ec8c0a5b0460e274293.cu
#include <stdio.h> #include <glew.h> #include <GLFW/glfw3.h> #include <cuda_runtime.h> #include <cuda_gl_interop.h> #include <helper_cuda.h> #include <helper_math.h> #include "line.cuh" #include "lineShader.hpp" #include "lib.hpp" int LineStructSize = (int)sizeof(LineStruct); __global__ void moveLines(LineStruct* line, LineFTStruct* ft, PointStruct* point, int n) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i > n) return; line[i].pos.f = point[ ft[i].f ].pos; line[i].pos.t = point[ ft[i].t ].pos; } Line::Line(int _count) { this->count = _count; this->size = LineStructSize * _count; this->data = (LineStruct*)malloc(this->size); this->ft = (LineFTStruct*)malloc(sizeof(LineFTStruct) * _count); this->data[0].pos.f = {0, 0, 0}; this->data[0].pos.t = {0.5, 0.5, 0}; this->ft[0].f = 0; this->ft[0].t = 1; this->ftdptr = NULL; checkCudaErrors(cudaMalloc((void **)&this->ftdptr, (int)sizeof(LineFTStruct) * _count)); checkCudaErrors(cudaMemcpy(this->ftdptr, this->ft, sizeof(LineFTStruct) * _count, cudaMemcpyHostToDevice)); // for (int i = 0; i < this->count; i++) { // this->data[i].pos.f = {trand(), trand(), 0}; // this->data[i].pos.t = {trand(), trand(), 0}; // } glGenBuffers(1, &this->VBO); this->dptr = NULL; this->lineShaderProgram = getLineShaderProgram(); glBindBuffer(GL_ARRAY_BUFFER, this->VBO); glBufferData(GL_ARRAY_BUFFER, this->size, this->data, GL_DYNAMIC_DRAW); checkCudaErrors(cudaGraphicsGLRegisterBuffer(&this->cuda_vbo_resource, this->VBO, cudaGraphicsMapFlagsNone)); glBindBuffer(GL_ARRAY_BUFFER, 0); } void Line::add(int f, int t) { this->count++; this->d2h(); // а вот тут мне уже надо работать не с this->data, // а с this->ft this->h2d(); } void Line::bindVBO() { checkCudaErrors(cudaGraphicsMapResources(1, &this->cuda_vbo_resource, 0)); size_t num_bytes; checkCudaErrors(cudaGraphicsResourceGetMappedPointer((void **)&this->dptr, &num_bytes, this->cuda_vbo_resource)); } void Line::unbindVBO() { checkCudaErrors(cudaGraphicsUnmapResources(1, &this->cuda_vbo_resource, 0)); } void Point::d2h() { this->bindVBO(); checkCudaErrors(cudaMemcpy((void *)this->data, this->dptr, this->size(), cudaMemcpyDeviceToHost)); this->unbindVBO(); } void Point::h2d() { this->bindVBO(); checkCudaErrors(cudaMemcpy(this->dptr, (void *)this->data, this->size(), cudaMemcpyHostToDevice)); this->unbindVBO(); } void Line::draw() { glEnable(GL_LINE_SMOOTH); glUseProgram(this->lineShaderProgram); glLineWidth(2.0); //glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, LineStructSize, (GLvoid *)0); glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, (GLvoid *)0); //glVertexPointer(3, GL_FLOAT, LineStructSize, 0); glVertexPointer(3, GL_FLOAT, 0, 0); //glInterleavedArrays(GL_V2F, 0, NULL); glDrawArrays(GL_LINES, 0, this->count * 2); glDisable(GL_LINE_SMOOTH); } void Line::tick(Point* p1) { int THREADS_PER_BLOCK = 1024; int threads = this->count % THREADS_PER_BLOCK; int blocks = (this->count + threads) / threads; moveLines<<<blocks, threads>>>(this->dptr, this->ftdptr, p1->dptr, this->count); }
739216b3d9cc2e3a8943dd82ef5dc52d4af4ecf4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // pass a single pair to kernel and it will return the count of commonn elements /************************************************************************ Author - Aman Maldar Simple code - parallel version of data association. Static value of minSupport=1. This will show all the pairs generated. File = 6entries.txt Limitation - Generates only till set of 4 pairs as of now. It needs multiple changes for the data structure as well. Need to reconfigure it. Data: (6entries.txt) 2 3 4 1 2 3 4 5 4 5 6 7 0 2 1 2 3 4 2 3 5 7 8 *************************************************************************/ #include "apriori.hcu" #include "functions.hcu" //double minSupp = 0.001; // 0.001; /* __shared__ int smem[128]; __global__ void addition_scan_kernel (int *A_device, int *B_device , int *ans_device) { int tid = threadIdx.x; __syncthreads(); int index1=0; int sum = 0; int begin = B_device[tid]; while (tid < 9){ printf("tid: %d begin: %d A_device[begin]: %d \n", tid, begin,A_device[begin]); while (A_device[begin+index1] != -1){ // map data from A_device to smem // limitation on smem comes in picture smem[begin+index1] = A_device[begin+index1]; __syncthreads(); index1++; } printf("index1: %d \n", index1); for (int i=begin;i<begin+index1;i++){ sum += smem[i]; __syncthreads(); } ans_device[tid] = sum; tid+=9; } } // end kernel function */ __global__ void find2_common_kernel (int *A_device, int *B_device , int *p, int *q, int *common_device) { int tid = threadIdx.x; //__syncthreads(); while (tid < 1) { // p =3 , q = 5 //int len_p = 4; // B_device[p+1] - B_device[p] - 1; // = 16-11 -1 = 4 1,2,5,6 //int len_q = 3; // B_device[q+1] - B_device[q] - 1; // = 25-21 -1 = 3 2,3,6 int len_p = B_device[*p+1] - B_device[*p] - 1; // = 16-11 -1 = 4 1,2,5,6 int len_q = B_device[*q+1] - B_device[*q] - 1; // = 25-21 -1 = 3 2,3,6 *common_device = 0; //int p_offset = 11; //int q_offset = 21; int p_offset = B_device[*p]; int q_offset = B_device[*q]; for (int i = 0; i < len_p; i++) { //int x = A_device[B_device[p]+i]; //xtmp += i; int x = A_device[p_offset+i]; int y = 0; for (int j = 0; j < len_q; j++) { //y = A_device[B_device[q]+j]; //ytmp += j; y = A_device[q_offset+j]; //printf("tid: %d x: %d y: %d\n", tid, x, y ); if (x == y) { //cout << " " << num1[i]; //i++; //j++; printf("tid: %d x: %d y: %d\n", tid, x, y ); *common_device +=1; } } // end inner for } // end outer for //*common_device = 10; tid++; } // end while } // end kernel function void Execute(int argc){ parse_database(argc); vector <int> A; //= globalDataset // convert itemId_TidMapping into long array vector <int> B ; // = globalDatasetThreadIndex; int *A_cpu = (int *) malloc (sizeof(int)* totalItems); int *B_cpu = (int *) malloc (sizeof(int)* (maxItemID+1)); //index array lenght same as number of items int *ans_cpu = (int *) malloc (sizeof(int)* (maxItemID+1)); int k =0; // global pointer for globalMap for(int i=0;i<=maxItemID;i++){ //B.push_back(k); B_cpu[i] = k; vector <int> tmp11 = itemId_TidMapping[i]; // copy entire vector for(int j=1;j<tmp11.size();j++){ // last item should be inclusive, first element is excluded // A.push_back(tmp11[j]); A_cpu[k] = tmp11[j]; k++; } //A.push_back(-1); // seperate mappings by -1 A_cpu[k] = -1; k++; } /* cout << " Printing itemId_TidMapping as array: " << endl; for(int i =0;i<A.size();i++){ //A_cpu[i] = A[i]; cout << A[i] << " " ; }cout << endl;*/ cout << " Printing itemId_TidMapping copy A_cpu: " << totalItems << endl; for(int i =0;i<totalItems;i++){ cout << A_cpu[i] << " " ; }cout << endl; /* cout << " Printing starting indexes " << endl; for(int i =0;i<B.size();i++){ cout << B[i] << " " ; }cout << endl;*/ cout << " Printing starting indexes B_cpu: " << endl; for(int i =0;i<= maxItemID;i++){ cout << B_cpu[i] << " " ; }cout << endl; //return; //vector <int> A; //= globalDataset // convert itemId_TidMapping into long array //vector <int> B ; // = globalDatasetThreadIndex; //int *A_cpu = globalDataset //int *B_cpu = globalDatasetThreadIndex int *A_device; //device storage pointers int *B_device; int *ans_device; hipMalloc ((void **) &A_device, sizeof (int) * totalItems); hipMalloc ((void **) &B_device, sizeof (int) * 9); hipMalloc ((void **) &ans_device, sizeof (int) * 9); /* hipMemcpy (A_device, A_cpu, sizeof (int) * totalItems, hipMemcpyHostToDevice); hipMemcpy (B_device, B_cpu, sizeof (int) * 9, hipMemcpyHostToDevice); hipMemcpy (ans_device, ans_cpu, sizeof (int) * 9, hipMemcpyHostToDevice); int numberOfBlocks = 1; int threadsInBlock = 100; addition_scan_kernel <<< numberOfBlocks,threadsInBlock >>> (A_device, B_device, ans_device); hipMemcpy (ans_cpu, ans_device, sizeof (int) * 9, hipMemcpyDeviceToHost); */ int *p_cpu = (int *) malloc (sizeof(int)); int *q_cpu = (int *) malloc (sizeof(int)); int *common_cpu = (int *) malloc (sizeof(int)); *p_cpu = 2; *q_cpu = 3; *common_cpu = 0; int *p_device; int *q_device; int *common_device; hipMalloc ((void **) &p_device, sizeof (int)); hipMalloc ((void **) &q_device, sizeof (int)); hipMalloc ((void **) &common_device, sizeof (int)); hipMemcpy (A_device, A_cpu, sizeof (int) * totalItems, hipMemcpyHostToDevice); hipMemcpy (B_device, B_cpu, sizeof (int) * 9, hipMemcpyHostToDevice); //hipMemcpy (ans_device, ans_cpu, sizeof (int) * 9, hipMemcpyHostToDevice); hipMemcpy (p_device, p_cpu, sizeof (int) * 1, hipMemcpyHostToDevice); hipMemcpy (q_device, q_cpu, sizeof (int) * 1, hipMemcpyHostToDevice); hipMemcpy (common_device, common_cpu, sizeof (int) * 1, hipMemcpyHostToDevice); int numberOfBlocks = 1; int threadsInBlock = 2; hipLaunchKernelGGL(( find2_common_kernel) , dim3(numberOfBlocks),dim3(threadsInBlock) , 0, 0, A_device, B_device, p_device, q_device, common_device); hipMemcpy (common_cpu, common_device, sizeof (int), hipMemcpyDeviceToHost); cout << "total common elements are: " << *common_cpu << endl; return; } // end Execute int main(int argc, char **argv){ auto start = chrono::high_resolution_clock::now(); Execute(argc); auto end = chrono::high_resolution_clock::now(); chrono::duration<double> el = end - start; cout<<"Execution time is: " << el.count() * 1000 << " mS " << endl; return 0; } /* L1.push_back(0); // initialized first index with 0 as we are not using it. //minSupport = round(minSupp * TID_Transactions); minSupport = 1; // Following code generates single items which have support greater than min_sup // compare the occurrence of the object against minSupport cout << "\n Support:" << minSupport << endl << "\n"; //Generate L1 - filtered single items ? I think this should be C1, not L1. for (int i=0; i<= maxItemID; i++) { if(itemIDcount[i] >= minSupport){ L1.push_back(i); //push TID into frequentItem one_freq_itemset++; //cout << "1 Frequent Item is: (" << i << ") Freq is: " << itemIDcount[i] << endl; } } //cout << "one_freq_itemset: " << one_freq_itemset << endl << "\n"; //****************************************************************************************************************** //Generate L2 . Make a pair of frequent items in L1 for (int i=0;i <= L1.size() -1 -1; i++) //-1 is done for eliminating first entry { for (int j=i+1;j <= L1.size() -1; j++){ twoStruct.a = L1[i]; twoStruct.b = L1[j]; L2.push_back(twoStruct); //cout << "2 Items are: (" <<L1[i]<< "," << L1[j] << ") " << endl; } } //****************************************************************************************************************** //Generate C2. Prune L2 . Compare against min_support and remove less frequent items. */ //---------------------------------------------------------------------------------
739216b3d9cc2e3a8943dd82ef5dc52d4af4ecf4.cu
// pass a single pair to kernel and it will return the count of commonn elements /************************************************************************ Author - Aman Maldar Simple code - parallel version of data association. Static value of minSupport=1. This will show all the pairs generated. File = 6entries.txt Limitation - Generates only till set of 4 pairs as of now. It needs multiple changes for the data structure as well. Need to reconfigure it. Data: (6entries.txt) 2 3 4 1 2 3 4 5 4 5 6 7 0 2 1 2 3 4 2 3 5 7 8 *************************************************************************/ #include "apriori.hcu" #include "functions.hcu" //double minSupp = 0.001; // 0.001; /* __shared__ int smem[128]; __global__ void addition_scan_kernel (int *A_device, int *B_device , int *ans_device) { int tid = threadIdx.x; __syncthreads(); int index1=0; int sum = 0; int begin = B_device[tid]; while (tid < 9){ printf("tid: %d begin: %d A_device[begin]: %d \n", tid, begin,A_device[begin]); while (A_device[begin+index1] != -1){ // map data from A_device to smem // limitation on smem comes in picture smem[begin+index1] = A_device[begin+index1]; __syncthreads(); index1++; } printf("index1: %d \n", index1); for (int i=begin;i<begin+index1;i++){ sum += smem[i]; __syncthreads(); } ans_device[tid] = sum; tid+=9; } } // end kernel function */ __global__ void find2_common_kernel (int *A_device, int *B_device , int *p, int *q, int *common_device) { int tid = threadIdx.x; //__syncthreads(); while (tid < 1) { // p =3 , q = 5 //int len_p = 4; // B_device[p+1] - B_device[p] - 1; // = 16-11 -1 = 4 1,2,5,6 //int len_q = 3; // B_device[q+1] - B_device[q] - 1; // = 25-21 -1 = 3 2,3,6 int len_p = B_device[*p+1] - B_device[*p] - 1; // = 16-11 -1 = 4 1,2,5,6 int len_q = B_device[*q+1] - B_device[*q] - 1; // = 25-21 -1 = 3 2,3,6 *common_device = 0; //int p_offset = 11; //int q_offset = 21; int p_offset = B_device[*p]; int q_offset = B_device[*q]; for (int i = 0; i < len_p; i++) { //int x = A_device[B_device[p]+i]; //xtmp += i; int x = A_device[p_offset+i]; int y = 0; for (int j = 0; j < len_q; j++) { //y = A_device[B_device[q]+j]; //ytmp += j; y = A_device[q_offset+j]; //printf("tid: %d x: %d y: %d\n", tid, x, y ); if (x == y) { //cout << " " << num1[i]; //i++; //j++; printf("tid: %d x: %d y: %d\n", tid, x, y ); *common_device +=1; } } // end inner for } // end outer for //*common_device = 10; tid++; } // end while } // end kernel function void Execute(int argc){ parse_database(argc); vector <int> A; //= globalDataset // convert itemId_TidMapping into long array vector <int> B ; // = globalDatasetThreadIndex; int *A_cpu = (int *) malloc (sizeof(int)* totalItems); int *B_cpu = (int *) malloc (sizeof(int)* (maxItemID+1)); //index array lenght same as number of items int *ans_cpu = (int *) malloc (sizeof(int)* (maxItemID+1)); int k =0; // global pointer for globalMap for(int i=0;i<=maxItemID;i++){ //B.push_back(k); B_cpu[i] = k; vector <int> tmp11 = itemId_TidMapping[i]; // copy entire vector for(int j=1;j<tmp11.size();j++){ // last item should be inclusive, first element is excluded // A.push_back(tmp11[j]); A_cpu[k] = tmp11[j]; k++; } //A.push_back(-1); // seperate mappings by -1 A_cpu[k] = -1; k++; } /* cout << " Printing itemId_TidMapping as array: " << endl; for(int i =0;i<A.size();i++){ //A_cpu[i] = A[i]; cout << A[i] << " " ; }cout << endl;*/ cout << " Printing itemId_TidMapping copy A_cpu: " << totalItems << endl; for(int i =0;i<totalItems;i++){ cout << A_cpu[i] << " " ; }cout << endl; /* cout << " Printing starting indexes " << endl; for(int i =0;i<B.size();i++){ cout << B[i] << " " ; }cout << endl;*/ cout << " Printing starting indexes B_cpu: " << endl; for(int i =0;i<= maxItemID;i++){ cout << B_cpu[i] << " " ; }cout << endl; //return; //vector <int> A; //= globalDataset // convert itemId_TidMapping into long array //vector <int> B ; // = globalDatasetThreadIndex; //int *A_cpu = globalDataset //int *B_cpu = globalDatasetThreadIndex int *A_device; //device storage pointers int *B_device; int *ans_device; cudaMalloc ((void **) &A_device, sizeof (int) * totalItems); cudaMalloc ((void **) &B_device, sizeof (int) * 9); cudaMalloc ((void **) &ans_device, sizeof (int) * 9); /* cudaMemcpy (A_device, A_cpu, sizeof (int) * totalItems, cudaMemcpyHostToDevice); cudaMemcpy (B_device, B_cpu, sizeof (int) * 9, cudaMemcpyHostToDevice); cudaMemcpy (ans_device, ans_cpu, sizeof (int) * 9, cudaMemcpyHostToDevice); int numberOfBlocks = 1; int threadsInBlock = 100; addition_scan_kernel <<< numberOfBlocks,threadsInBlock >>> (A_device, B_device, ans_device); cudaMemcpy (ans_cpu, ans_device, sizeof (int) * 9, cudaMemcpyDeviceToHost); */ int *p_cpu = (int *) malloc (sizeof(int)); int *q_cpu = (int *) malloc (sizeof(int)); int *common_cpu = (int *) malloc (sizeof(int)); *p_cpu = 2; *q_cpu = 3; *common_cpu = 0; int *p_device; int *q_device; int *common_device; cudaMalloc ((void **) &p_device, sizeof (int)); cudaMalloc ((void **) &q_device, sizeof (int)); cudaMalloc ((void **) &common_device, sizeof (int)); cudaMemcpy (A_device, A_cpu, sizeof (int) * totalItems, cudaMemcpyHostToDevice); cudaMemcpy (B_device, B_cpu, sizeof (int) * 9, cudaMemcpyHostToDevice); //cudaMemcpy (ans_device, ans_cpu, sizeof (int) * 9, cudaMemcpyHostToDevice); cudaMemcpy (p_device, p_cpu, sizeof (int) * 1, cudaMemcpyHostToDevice); cudaMemcpy (q_device, q_cpu, sizeof (int) * 1, cudaMemcpyHostToDevice); cudaMemcpy (common_device, common_cpu, sizeof (int) * 1, cudaMemcpyHostToDevice); int numberOfBlocks = 1; int threadsInBlock = 2; find2_common_kernel <<< numberOfBlocks,threadsInBlock >>> (A_device, B_device, p_device, q_device, common_device); cudaMemcpy (common_cpu, common_device, sizeof (int), cudaMemcpyDeviceToHost); cout << "total common elements are: " << *common_cpu << endl; return; } // end Execute int main(int argc, char **argv){ auto start = chrono::high_resolution_clock::now(); Execute(argc); auto end = chrono::high_resolution_clock::now(); chrono::duration<double> el = end - start; cout<<"Execution time is: " << el.count() * 1000 << " mS " << endl; return 0; } /* L1.push_back(0); // initialized first index with 0 as we are not using it. //minSupport = round(minSupp * TID_Transactions); minSupport = 1; // Following code generates single items which have support greater than min_sup // compare the occurrence of the object against minSupport cout << "\n Support:" << minSupport << endl << "\n"; //Generate L1 - filtered single items ? I think this should be C1, not L1. for (int i=0; i<= maxItemID; i++) { if(itemIDcount[i] >= minSupport){ L1.push_back(i); //push TID into frequentItem one_freq_itemset++; //cout << "1 Frequent Item is: (" << i << ") Freq is: " << itemIDcount[i] << endl; } } //cout << "one_freq_itemset: " << one_freq_itemset << endl << "\n"; //****************************************************************************************************************** //Generate L2 . Make a pair of frequent items in L1 for (int i=0;i <= L1.size() -1 -1; i++) //-1 is done for eliminating first entry { for (int j=i+1;j <= L1.size() -1; j++){ twoStruct.a = L1[i]; twoStruct.b = L1[j]; L2.push_back(twoStruct); //cout << "2 Items are: (" <<L1[i]<< "," << L1[j] << ") " << endl; } } //****************************************************************************************************************** //Generate C2. Prune L2 . Compare against min_support and remove less frequent items. */ //---------------------------------------------------------------------------------
13d45d44af8fe03608dfe39e142f578f042249da.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* ============================================================================ Name : math_kernels.cu Author : Shuyang Sun Version : Copyright : Shuyang Sun, all rights reserved. ============================================================================ */ #include "math_kernels.hpp" __global__ void FPO_1() { float const res{1.0f * 1.5f}; } __global__ void FPO_2() { float const res{1.0f * 1.5f + 2.0f}; } __global__ void FPO_3() { float const res{1.0f * 1.5f + 2.0f * 8.0f}; } __global__ void FPO_4() { float const res{1.0f * 1.5f + 2.0f * 8.0f - 9.6f}; } __global__ void FPO_5() { float const res{1.0f * 1.5f + 2.0f * 8.0f - 9.6f * 2.0f}; } __global__ void FPO_6() { float const res{1.0f * 1.5f + 2.0f * 8.0f - 9.6f * 2.0f + 3.0f}; } __global__ void FPO_7() { float const res{1.0f * 1.5f + 2.0f * 8.0f - 9.6f * 2.0f + 3.0f - 2.5f}; } __global__ void FPO_8() { float const res{1.0f * 1.5f + 2.0f * 8.0f - 9.6f * 2.0f + 3.0f - 2.5f * 0.5f}; } __global__ void FPO_9() { float const res{1.0f * 1.5f + 2.0f * 8.0f - 9.6f * 2.0f + 3.0f - 2.5f * 0.5f - 2.4f}; } __global__ void FPO_10() { float const res{1.0f * 1.5f + 2.0f * 8.0f - 9.6f * 2.0f + 3.0f - 2.5f * 0.5f - 2.4f + 2.3f}; } __global__ void FPO_11() { float const res{1.0f * 1.5f + 2.0f * 8.0f - 9.6f * 2.0f + 3.0f - 2.5f * 0.5f - 2.4f + 2.3f * 8.9f}; } __global__ void FPO_12() { float const res{1.0f * 1.5f + 2.0f * 8.0f - 9.6f * 2.0f + 3.0f - 2.5f * 0.5f - 2.4f + 2.3f * 8.9f - 3.5f}; } __global__ void FPO_13() { float const res{1.0f * 1.5f + 2.0f * 8.0f - 9.6f * 2.0f + 3.0f - 2.5f * 0.5f - 2.4f + 2.3f * 8.9f - 3.5f + 11.6f}; } __global__ void FPO_14() { float const res{1.0f * 1.5f + 2.0f * 8.0f - 9.6f * 2.0f + 3.0f - 2.5f * 0.5f - 2.4f + 2.3f * 8.9f - 3.5f + 11.6f - 7.8f}; } __global__ void FPO_15() { float const res{1.0f * 1.5f + 2.0f * 8.0f - 9.6f * 2.0f + 3.0f - 2.5f * 0.5f - 2.4f + 2.3f * 8.9f - 3.5f + 11.6f - 7.8f * 3.0f}; } __global__ void FPO_16() { float const res{1.0f * 1.5f + 2.0f * 8.0f - 9.6f * 2.0f + 3.0f - 2.5f * 0.5f - 2.4f + 2.3f * 8.9f - 3.5f + 11.6f - 7.8f * 3.0f - 2.0f}; } __global__ void SFU_1() { float const res{__sinf(2.0f)}; } __global__ void SFU_2() { float const res{__sinf(__cosf(2.0f))}; } __global__ void SFU_3() { float const res{__sinf(__cosf(__sinf(2.0f)))}; } __global__ void SFU_4() { float const res{__sinf(__cosf(__sinf(__cosf(2.0f))))}; } __global__ void SFU_5() { float const res{__sinf(__cosf(__sinf(__cosf(__sinf(2.0f)))))}; } __global__ void SFU_6() { float const res{__sinf(__cosf(__sinf(__cosf(__sinf(__cosf(2.0f))))))}; } __global__ void SFU_7() { float const res{__sinf(__cosf(__sinf(__cosf(__sinf(__cosf(__sinf(2.0f)))))))}; } __global__ void SFU_8() { float const res{__sinf(__cosf(__sinf(__cosf(__sinf(__cosf(__sinf(__cosf(2.0f))))))))}; } __global__ void SFU_9() { float const res{__sinf(__cosf(__sinf(__cosf(__sinf(__cosf(__sinf(__cosf(__sinf(2.0f)))))))))}; } __global__ void SFU_10() { float const res{__sinf(__cosf(__sinf(__cosf(__sinf(__cosf(__sinf(__cosf(__sinf(__cosf(2.0f))))))))))}; } __global__ void SFU_11() { float const res{__sinf(__cosf(__sinf(__cosf(__sinf(__cosf(__sinf(__cosf(__sinf(__cosf(__sinf(2.0f)))))))))))}; } __global__ void SFU_12() { float const res{__sinf(__cosf(__sinf(__cosf(__sinf(__cosf(__sinf(__cosf(__sinf(__cosf(__sinf(__cosf(2.0f))))))))))))}; } __global__ void SFU_13() { float const res{__sinf(__cosf(__sinf(__cosf(__sinf(__cosf(__sinf(__cosf(__sinf(__cosf(__sinf(__cosf(__sinf(2.0f)))))))))))))}; } __global__ void SFU_14() { float const res{__sinf(__cosf(__sinf(__cosf(__sinf(__cosf(__sinf(__cosf(__sinf(__cosf(__sinf(__cosf(__sinf(__cosf(2.0f))))))))))))))}; } __global__ void SFU_15() { float const res{__sinf(__cosf(__sinf(__cosf(__sinf(__cosf(__sinf(__cosf(__sinf(__cosf(__sinf(__cosf(__sinf(__cosf(__sinf(2.0f)))))))))))))))}; } __global__ void SFU_16() { float const res{__sinf(__cosf(__sinf(__cosf(__sinf(__cosf(__sinf(__cosf(__sinf(__cosf(__sinf(__cosf(__sinf(__cosf(__sinf(__cosf(2.0f))))))))))))))))}; } __device__ float TrigRes(float const x) { return ((0.75f * x * x - 4.71239f * x + 5.9022f) * __cosf(x) + (-0.0833333f * x * x + 0.523599f * x - 0.803949f) * __cosf(3.0f * x) + 4.5f * x - 1.5f * x * __sinf(x) + 0.0555556f * x * __sinf(3.0f * x) + 6.96239f * __sinf(x) + 0.0754671f * __sinf(3.0f * x))/(9.0f * 3.141592653f); } __device__ float PolyNormalRes(float const x) { return 0.20019404249547249f - 0.01066466223648254f * x + 0.027284743817578543f * x * x + 0.006805423711959009f * x * x * x - 0.00110029250856299f * x * x * x * x; } __device__ float PolyNormalCachedRes(float const x) { float const x2{x * x}; float const x3{x2 * x}; float const x4{x3 * x}; return 0.20019404249547249f - 0.01066466223648254f * x + 0.027284743817578543f * x2 + 0.006805423711959009f * x3 - 0.00110029250856299f * x4; } __device__ float PolyNestedRes(float const x) { return 0.20019404249547249f - 0.01066466223648254f * x * (1.0f - 2.558425500269543f * x * (1.0f + 0.24942230564666426f * x * (1.0f - 0.1616787661037875f * x))); } __device__ float PolyRootsRes(float const x) { return -0.011f * (x - 9.0517f) * (x + 3.8958f) * (x * (x - 0.5146f) + 5.1595f); } __global__ void TrigFunc_2(const float * const data_in, float * const data_out, size_t const size) { const size_t idx{threadIdx.x + blockIdx.x * blockDim.x}; const size_t idx_2{idx * 2}; if (idx_2 < size) { const float x1{data_in[idx_2]}; const float x2{data_in[idx_2 + 1]}; const float res1{TrigRes(x1)}; const float res2{TrigRes(x2)}; data_out[idx] = res2 - res1; } } __global__ void PolyNormalFunc_2(const float * const data_in, float * const data_out, size_t const size) { const size_t idx{threadIdx.x + blockIdx.x * blockDim.x}; const size_t idx_2{idx * 2}; if (idx_2 < size) { const float x1{data_in[idx_2]}; const float x2{data_in[idx_2 + 1]}; const float res1{PolyNormalRes(x1)}; const float res2{PolyNormalRes(x2)}; data_out[idx] = res2 - res1; } } __global__ void PolyNormalCachedFunc_2(const float * const data_in, float * const data_out, size_t const size) { const size_t idx{threadIdx.x + blockIdx.x * blockDim.x}; const size_t idx_2{idx * 2}; if (idx_2 < size) { const float x1{data_in[idx_2]}; const float x2{data_in[idx_2 + 1]}; const float res1{PolyNormalCachedRes(x1)}; const float res2{PolyNormalCachedRes(x2)}; data_out[idx] = res2 - res1; } } __global__ void PolyNestedFunc_2(const float * const data_in, float * const data_out, size_t const size) { const size_t idx{threadIdx.x + blockIdx.x * blockDim.x}; const size_t idx_2{idx * 2}; if (idx_2 < size) { const float x1{data_in[idx_2]}; const float x2{data_in[idx_2 + 1]}; const float res1{PolyNestedRes(x1)}; const float res2{PolyNestedRes(x2)}; data_out[idx] = res2 - res1; } } __global__ void PolyRootsFunc_2(const float * const data_in, float * const data_out, size_t const size) { const size_t idx{threadIdx.x + blockIdx.x * blockDim.x}; const size_t idx_2{idx * 2}; if (idx_2 < size) { const float x1{data_in[idx_2]}; const float x2{data_in[idx_2 + 1]}; const float res1{PolyRootsRes(x1)}; const float res2{PolyRootsRes(x2)}; data_out[idx] = res2 - res1; } } __global__ void TrigFunc_4(const float * const data_in, float * const data_out, size_t const size) { const size_t idx{threadIdx.x + blockIdx.x * blockDim.x}; const size_t idx_2{idx * 2}; if (idx_2 < size) { const float x1{data_in[idx_2]}; const float x2{data_in[idx_2 + 1]}; const float res1{TrigRes(x1)}; const float res2{TrigRes(x2)}; const float res3{TrigRes(x1 + 1.0f)}; const float res4{TrigRes(x2 + 1.0f)}; data_out[idx] = res2 - res1 + res4 - res3; } } __global__ void PolyNormalFunc_4(const float * const data_in, float * const data_out, size_t const size) { const size_t idx{threadIdx.x + blockIdx.x * blockDim.x}; const size_t idx_2{idx * 2}; if (idx_2 < size) { const float x1{data_in[idx_2]}; const float x2{data_in[idx_2 + 1]}; const float res1{PolyNormalRes(x1)}; const float res2{PolyNormalRes(x2)}; const float res3{PolyNormalRes(x1 + 1.0f)}; const float res4{PolyNormalRes(x2 + 1.0f)}; data_out[idx] = res2 - res1 + res4 - res3; } } __global__ void PolyNormalCachedFunc_4(const float * const data_in, float * const data_out, size_t const size) { const size_t idx{threadIdx.x + blockIdx.x * blockDim.x}; const size_t idx_2{idx * 2}; if (idx_2 < size) { const float x1{data_in[idx_2]}; const float x2{data_in[idx_2 + 1]}; const float res1{PolyNormalCachedRes(x1)}; const float res2{PolyNormalCachedRes(x2)}; const float res3{PolyNormalCachedRes(x1 + 1.0f)}; const float res4{PolyNormalCachedRes(x2 + 1.0f)}; data_out[idx] = res2 - res1 + res4 - res3; } } __global__ void PolyNestedFunc_4(const float * const data_in, float * const data_out, size_t const size) { const size_t idx{threadIdx.x + blockIdx.x * blockDim.x}; const size_t idx_2{idx * 2}; if (idx_2 < size) { const float x1{data_in[idx_2]}; const float x2{data_in[idx_2 + 1]}; const float res1{PolyNestedRes(x1)}; const float res2{PolyNestedRes(x2)}; const float res3{PolyNestedRes(x1 + 1.0f)}; const float res4{PolyNestedRes(x2 + 1.0f)}; data_out[idx] = res2 - res1 + res4 - res3; } } __global__ void PolyRootsFunc_4(const float * const data_in, float * const data_out, size_t const size) { const size_t idx{threadIdx.x + blockIdx.x * blockDim.x}; const size_t idx_2{idx * 2}; if (idx_2 < size) { const float x1{data_in[idx_2]}; const float x2{data_in[idx_2 + 1]}; const float res1{PolyRootsRes(x1)}; const float res2{PolyRootsRes(x2)}; const float res3{PolyRootsRes(x1 + 1.0f)}; const float res4{PolyRootsRes(x2 + 1.0f)}; data_out[idx] = res2 - res1 + res4 - res3; } } __global__ void TrigFunc_6(const float * const data_in, float * const data_out, size_t const size) { const size_t idx{threadIdx.x + blockIdx.x * blockDim.x}; const size_t idx_2{idx * 2}; if (idx_2 < size) { const float x1{data_in[idx_2]}; const float x2{data_in[idx_2 + 1]}; const float res1{TrigRes(x1)}; const float res2{TrigRes(x2)}; const float res3{TrigRes(x1 + 1.0f)}; const float res4{TrigRes(x2 + 1.0f)}; const float res5{TrigRes(x1 - 1.0f)}; const float res6{TrigRes(x2 - 1.0f)}; data_out[idx] = res2 - res1 + res4 - res3 + res6 - res5; } } __global__ void PolyNormalFunc_6(const float * const data_in, float * const data_out, size_t const size) { const size_t idx{threadIdx.x + blockIdx.x * blockDim.x}; const size_t idx_2{idx * 2}; if (idx_2 < size) { const float x1{data_in[idx_2]}; const float x2{data_in[idx_2 + 1]}; const float res1{PolyNormalRes(x1)}; const float res2{PolyNormalRes(x2)}; const float res3{PolyNormalRes(x1 + 1.0f)}; const float res4{PolyNormalRes(x2 + 1.0f)}; const float res5{PolyNormalRes(x1 - 1.0f)}; const float res6{PolyNormalRes(x2 - 1.0f)}; data_out[idx] = res2 - res1 + res4 - res3 + res6 - res5; } } __global__ void PolyNormalCachedFunc_6(const float * const data_in, float * const data_out, size_t const size) { const size_t idx{threadIdx.x + blockIdx.x * blockDim.x}; const size_t idx_2{idx * 2}; if (idx_2 < size) { const float x1{data_in[idx_2]}; const float x2{data_in[idx_2 + 1]}; const float res1{PolyNormalCachedRes(x1)}; const float res2{PolyNormalCachedRes(x2)}; const float res3{PolyNormalCachedRes(x1 + 1.0f)}; const float res4{PolyNormalCachedRes(x2 + 1.0f)}; const float res5{PolyNormalCachedRes(x1 - 1.0f)}; const float res6{PolyNormalCachedRes(x2 - 1.0f)}; data_out[idx] = res2 - res1 + res4 - res3 + res6 - res5; } } __global__ void PolyNestedFunc_6(const float * const data_in, float * const data_out, size_t const size) { const size_t idx{threadIdx.x + blockIdx.x * blockDim.x}; const size_t idx_2{idx * 2}; if (idx_2 < size) { const float x1{data_in[idx_2]}; const float x2{data_in[idx_2 + 1]}; const float res1{PolyNestedRes(x1)}; const float res2{PolyNestedRes(x2)}; const float res3{PolyNestedRes(x1 + 1.0f)}; const float res4{PolyNestedRes(x2 + 1.0f)}; const float res5{PolyNestedRes(x1 - 1.0f)}; const float res6{PolyNestedRes(x2 - 1.0f)}; data_out[idx] = res2 - res1 + res4 - res3 + res6 - res5; } } __global__ void PolyRootsFunc_6(const float * const data_in, float * const data_out, size_t const size) { const size_t idx{threadIdx.x + blockIdx.x * blockDim.x}; const size_t idx_2{idx * 2}; if (idx_2 < size) { const float x1{data_in[idx_2]}; const float x2{data_in[idx_2 + 1]}; const float res1{PolyRootsRes(x1)}; const float res2{PolyRootsRes(x2)}; const float res3{PolyRootsRes(x1 + 1.0f)}; const float res4{PolyRootsRes(x2 + 1.0f)}; const float res5{PolyRootsRes(x1 - 1.0f)}; const float res6{PolyRootsRes(x2 - 1.0f)}; data_out[idx] = res2 - res1 + res4 - res3 + res6 - res5; } } __global__ void TrigFunc_8(const float * const data_in, float * const data_out, size_t const size) { const size_t idx{threadIdx.x + blockIdx.x * blockDim.x}; const size_t idx_2{idx * 2}; if (idx_2 < size) { const float x1{data_in[idx_2]}; const float x2{data_in[idx_2 + 1]}; const float res1{TrigRes(x1)}; const float res2{TrigRes(x2)}; const float res3{TrigRes(x1 + 1.0f)}; const float res4{TrigRes(x2 + 1.0f)}; const float res5{TrigRes(x1 - 1.0f)}; const float res6{TrigRes(x2 - 1.0f)}; const float res7{TrigRes(x1 + 2.0f)}; const float res8{TrigRes(x2 + 2.0f)}; data_out[idx] = res2 - res1 + res4 - res3 + res6 - res5 + res8 - res7; } } __global__ void PolyNormalFunc_8(const float * const data_in, float * const data_out, size_t const size) { const size_t idx{threadIdx.x + blockIdx.x * blockDim.x}; const size_t idx_2{idx * 2}; if (idx_2 < size) { const float x1{data_in[idx_2]}; const float x2{data_in[idx_2 + 1]}; const float res1{PolyNormalRes(x1)}; const float res2{PolyNormalRes(x2)}; const float res3{PolyNormalRes(x1 + 1.0f)}; const float res4{PolyNormalRes(x2 + 1.0f)}; const float res5{PolyNormalRes(x1 - 1.0f)}; const float res6{PolyNormalRes(x2 - 1.0f)}; const float res7{PolyNormalRes(x1 + 2.0f)}; const float res8{PolyNormalRes(x2 + 2.0f)}; data_out[idx] = res2 - res1 + res4 - res3 + res6 - res5 + res8 - res7; } } __global__ void PolyNormalCachedFunc_8(const float * const data_in, float * const data_out, size_t const size) { const size_t idx{threadIdx.x + blockIdx.x * blockDim.x}; const size_t idx_2{idx * 2}; if (idx_2 < size) { const float x1{data_in[idx_2]}; const float x2{data_in[idx_2 + 1]}; const float res1{PolyNormalCachedRes(x1)}; const float res2{PolyNormalCachedRes(x2)}; const float res3{PolyNormalCachedRes(x1 + 1.0f)}; const float res4{PolyNormalCachedRes(x2 + 1.0f)}; const float res5{PolyNormalCachedRes(x1 - 1.0f)}; const float res6{PolyNormalCachedRes(x2 - 1.0f)}; const float res7{PolyNormalCachedRes(x1 + 2.0f)}; const float res8{PolyNormalCachedRes(x2 + 2.0f)}; data_out[idx] = res2 - res1 + res4 - res3 + res6 - res5 + res8 - res7; } } __global__ void PolyNestedFunc_8(const float * const data_in, float * const data_out, size_t const size) { const size_t idx{threadIdx.x + blockIdx.x * blockDim.x}; const size_t idx_2{idx * 2}; if (idx_2 < size) { const float x1{data_in[idx_2]}; const float x2{data_in[idx_2 + 1]}; const float res1{PolyNestedRes(x1)}; const float res2{PolyNestedRes(x2)}; const float res3{PolyNestedRes(x1 + 1.0f)}; const float res4{PolyNestedRes(x2 + 1.0f)}; const float res5{PolyNestedRes(x1 - 1.0f)}; const float res6{PolyNestedRes(x2 - 1.0f)}; const float res7{PolyNestedRes(x1 + 2.0f)}; const float res8{PolyNestedRes(x2 + 2.0f)}; data_out[idx] = res2 - res1 + res4 - res3 + res6 - res5 + res8 - res7; } } __global__ void PolyRootsFunc_8(const float * const data_in, float * const data_out, size_t const size) { const size_t idx{threadIdx.x + blockIdx.x * blockDim.x}; const size_t idx_2{idx * 2}; if (idx_2 < size) { const float x1{data_in[idx_2]}; const float x2{data_in[idx_2 + 1]}; const float res1{PolyRootsRes(x1)}; const float res2{PolyRootsRes(x2)}; const float res3{PolyRootsRes(x1 + 1.0f)}; const float res4{PolyRootsRes(x2 + 1.0f)}; const float res5{PolyRootsRes(x1 - 1.0f)}; const float res6{PolyRootsRes(x2 - 1.0f)}; const float res7{PolyRootsRes(x1 + 2.0f)}; const float res8{PolyRootsRes(x2 + 2.0f)}; data_out[idx] = res2 - res1 + res4 - res3 + res6 - res5 + res8 - res7; } }
13d45d44af8fe03608dfe39e142f578f042249da.cu
/* ============================================================================ Name : math_kernels.cu Author : Shuyang Sun Version : Copyright : Shuyang Sun, all rights reserved. ============================================================================ */ #include "math_kernels.hpp" __global__ void FPO_1() { float const res{1.0f * 1.5f}; } __global__ void FPO_2() { float const res{1.0f * 1.5f + 2.0f}; } __global__ void FPO_3() { float const res{1.0f * 1.5f + 2.0f * 8.0f}; } __global__ void FPO_4() { float const res{1.0f * 1.5f + 2.0f * 8.0f - 9.6f}; } __global__ void FPO_5() { float const res{1.0f * 1.5f + 2.0f * 8.0f - 9.6f * 2.0f}; } __global__ void FPO_6() { float const res{1.0f * 1.5f + 2.0f * 8.0f - 9.6f * 2.0f + 3.0f}; } __global__ void FPO_7() { float const res{1.0f * 1.5f + 2.0f * 8.0f - 9.6f * 2.0f + 3.0f - 2.5f}; } __global__ void FPO_8() { float const res{1.0f * 1.5f + 2.0f * 8.0f - 9.6f * 2.0f + 3.0f - 2.5f * 0.5f}; } __global__ void FPO_9() { float const res{1.0f * 1.5f + 2.0f * 8.0f - 9.6f * 2.0f + 3.0f - 2.5f * 0.5f - 2.4f}; } __global__ void FPO_10() { float const res{1.0f * 1.5f + 2.0f * 8.0f - 9.6f * 2.0f + 3.0f - 2.5f * 0.5f - 2.4f + 2.3f}; } __global__ void FPO_11() { float const res{1.0f * 1.5f + 2.0f * 8.0f - 9.6f * 2.0f + 3.0f - 2.5f * 0.5f - 2.4f + 2.3f * 8.9f}; } __global__ void FPO_12() { float const res{1.0f * 1.5f + 2.0f * 8.0f - 9.6f * 2.0f + 3.0f - 2.5f * 0.5f - 2.4f + 2.3f * 8.9f - 3.5f}; } __global__ void FPO_13() { float const res{1.0f * 1.5f + 2.0f * 8.0f - 9.6f * 2.0f + 3.0f - 2.5f * 0.5f - 2.4f + 2.3f * 8.9f - 3.5f + 11.6f}; } __global__ void FPO_14() { float const res{1.0f * 1.5f + 2.0f * 8.0f - 9.6f * 2.0f + 3.0f - 2.5f * 0.5f - 2.4f + 2.3f * 8.9f - 3.5f + 11.6f - 7.8f}; } __global__ void FPO_15() { float const res{1.0f * 1.5f + 2.0f * 8.0f - 9.6f * 2.0f + 3.0f - 2.5f * 0.5f - 2.4f + 2.3f * 8.9f - 3.5f + 11.6f - 7.8f * 3.0f}; } __global__ void FPO_16() { float const res{1.0f * 1.5f + 2.0f * 8.0f - 9.6f * 2.0f + 3.0f - 2.5f * 0.5f - 2.4f + 2.3f * 8.9f - 3.5f + 11.6f - 7.8f * 3.0f - 2.0f}; } __global__ void SFU_1() { float const res{__sinf(2.0f)}; } __global__ void SFU_2() { float const res{__sinf(__cosf(2.0f))}; } __global__ void SFU_3() { float const res{__sinf(__cosf(__sinf(2.0f)))}; } __global__ void SFU_4() { float const res{__sinf(__cosf(__sinf(__cosf(2.0f))))}; } __global__ void SFU_5() { float const res{__sinf(__cosf(__sinf(__cosf(__sinf(2.0f)))))}; } __global__ void SFU_6() { float const res{__sinf(__cosf(__sinf(__cosf(__sinf(__cosf(2.0f))))))}; } __global__ void SFU_7() { float const res{__sinf(__cosf(__sinf(__cosf(__sinf(__cosf(__sinf(2.0f)))))))}; } __global__ void SFU_8() { float const res{__sinf(__cosf(__sinf(__cosf(__sinf(__cosf(__sinf(__cosf(2.0f))))))))}; } __global__ void SFU_9() { float const res{__sinf(__cosf(__sinf(__cosf(__sinf(__cosf(__sinf(__cosf(__sinf(2.0f)))))))))}; } __global__ void SFU_10() { float const res{__sinf(__cosf(__sinf(__cosf(__sinf(__cosf(__sinf(__cosf(__sinf(__cosf(2.0f))))))))))}; } __global__ void SFU_11() { float const res{__sinf(__cosf(__sinf(__cosf(__sinf(__cosf(__sinf(__cosf(__sinf(__cosf(__sinf(2.0f)))))))))))}; } __global__ void SFU_12() { float const res{__sinf(__cosf(__sinf(__cosf(__sinf(__cosf(__sinf(__cosf(__sinf(__cosf(__sinf(__cosf(2.0f))))))))))))}; } __global__ void SFU_13() { float const res{__sinf(__cosf(__sinf(__cosf(__sinf(__cosf(__sinf(__cosf(__sinf(__cosf(__sinf(__cosf(__sinf(2.0f)))))))))))))}; } __global__ void SFU_14() { float const res{__sinf(__cosf(__sinf(__cosf(__sinf(__cosf(__sinf(__cosf(__sinf(__cosf(__sinf(__cosf(__sinf(__cosf(2.0f))))))))))))))}; } __global__ void SFU_15() { float const res{__sinf(__cosf(__sinf(__cosf(__sinf(__cosf(__sinf(__cosf(__sinf(__cosf(__sinf(__cosf(__sinf(__cosf(__sinf(2.0f)))))))))))))))}; } __global__ void SFU_16() { float const res{__sinf(__cosf(__sinf(__cosf(__sinf(__cosf(__sinf(__cosf(__sinf(__cosf(__sinf(__cosf(__sinf(__cosf(__sinf(__cosf(2.0f))))))))))))))))}; } __device__ float TrigRes(float const x) { return ((0.75f * x * x - 4.71239f * x + 5.9022f) * __cosf(x) + (-0.0833333f * x * x + 0.523599f * x - 0.803949f) * __cosf(3.0f * x) + 4.5f * x - 1.5f * x * __sinf(x) + 0.0555556f * x * __sinf(3.0f * x) + 6.96239f * __sinf(x) + 0.0754671f * __sinf(3.0f * x))/(9.0f * 3.141592653f); } __device__ float PolyNormalRes(float const x) { return 0.20019404249547249f - 0.01066466223648254f * x + 0.027284743817578543f * x * x + 0.006805423711959009f * x * x * x - 0.00110029250856299f * x * x * x * x; } __device__ float PolyNormalCachedRes(float const x) { float const x2{x * x}; float const x3{x2 * x}; float const x4{x3 * x}; return 0.20019404249547249f - 0.01066466223648254f * x + 0.027284743817578543f * x2 + 0.006805423711959009f * x3 - 0.00110029250856299f * x4; } __device__ float PolyNestedRes(float const x) { return 0.20019404249547249f - 0.01066466223648254f * x * (1.0f - 2.558425500269543f * x * (1.0f + 0.24942230564666426f * x * (1.0f - 0.1616787661037875f * x))); } __device__ float PolyRootsRes(float const x) { return -0.011f * (x - 9.0517f) * (x + 3.8958f) * (x * (x - 0.5146f) + 5.1595f); } __global__ void TrigFunc_2(const float * const data_in, float * const data_out, size_t const size) { const size_t idx{threadIdx.x + blockIdx.x * blockDim.x}; const size_t idx_2{idx * 2}; if (idx_2 < size) { const float x1{data_in[idx_2]}; const float x2{data_in[idx_2 + 1]}; const float res1{TrigRes(x1)}; const float res2{TrigRes(x2)}; data_out[idx] = res2 - res1; } } __global__ void PolyNormalFunc_2(const float * const data_in, float * const data_out, size_t const size) { const size_t idx{threadIdx.x + blockIdx.x * blockDim.x}; const size_t idx_2{idx * 2}; if (idx_2 < size) { const float x1{data_in[idx_2]}; const float x2{data_in[idx_2 + 1]}; const float res1{PolyNormalRes(x1)}; const float res2{PolyNormalRes(x2)}; data_out[idx] = res2 - res1; } } __global__ void PolyNormalCachedFunc_2(const float * const data_in, float * const data_out, size_t const size) { const size_t idx{threadIdx.x + blockIdx.x * blockDim.x}; const size_t idx_2{idx * 2}; if (idx_2 < size) { const float x1{data_in[idx_2]}; const float x2{data_in[idx_2 + 1]}; const float res1{PolyNormalCachedRes(x1)}; const float res2{PolyNormalCachedRes(x2)}; data_out[idx] = res2 - res1; } } __global__ void PolyNestedFunc_2(const float * const data_in, float * const data_out, size_t const size) { const size_t idx{threadIdx.x + blockIdx.x * blockDim.x}; const size_t idx_2{idx * 2}; if (idx_2 < size) { const float x1{data_in[idx_2]}; const float x2{data_in[idx_2 + 1]}; const float res1{PolyNestedRes(x1)}; const float res2{PolyNestedRes(x2)}; data_out[idx] = res2 - res1; } } __global__ void PolyRootsFunc_2(const float * const data_in, float * const data_out, size_t const size) { const size_t idx{threadIdx.x + blockIdx.x * blockDim.x}; const size_t idx_2{idx * 2}; if (idx_2 < size) { const float x1{data_in[idx_2]}; const float x2{data_in[idx_2 + 1]}; const float res1{PolyRootsRes(x1)}; const float res2{PolyRootsRes(x2)}; data_out[idx] = res2 - res1; } } __global__ void TrigFunc_4(const float * const data_in, float * const data_out, size_t const size) { const size_t idx{threadIdx.x + blockIdx.x * blockDim.x}; const size_t idx_2{idx * 2}; if (idx_2 < size) { const float x1{data_in[idx_2]}; const float x2{data_in[idx_2 + 1]}; const float res1{TrigRes(x1)}; const float res2{TrigRes(x2)}; const float res3{TrigRes(x1 + 1.0f)}; const float res4{TrigRes(x2 + 1.0f)}; data_out[idx] = res2 - res1 + res4 - res3; } } __global__ void PolyNormalFunc_4(const float * const data_in, float * const data_out, size_t const size) { const size_t idx{threadIdx.x + blockIdx.x * blockDim.x}; const size_t idx_2{idx * 2}; if (idx_2 < size) { const float x1{data_in[idx_2]}; const float x2{data_in[idx_2 + 1]}; const float res1{PolyNormalRes(x1)}; const float res2{PolyNormalRes(x2)}; const float res3{PolyNormalRes(x1 + 1.0f)}; const float res4{PolyNormalRes(x2 + 1.0f)}; data_out[idx] = res2 - res1 + res4 - res3; } } __global__ void PolyNormalCachedFunc_4(const float * const data_in, float * const data_out, size_t const size) { const size_t idx{threadIdx.x + blockIdx.x * blockDim.x}; const size_t idx_2{idx * 2}; if (idx_2 < size) { const float x1{data_in[idx_2]}; const float x2{data_in[idx_2 + 1]}; const float res1{PolyNormalCachedRes(x1)}; const float res2{PolyNormalCachedRes(x2)}; const float res3{PolyNormalCachedRes(x1 + 1.0f)}; const float res4{PolyNormalCachedRes(x2 + 1.0f)}; data_out[idx] = res2 - res1 + res4 - res3; } } __global__ void PolyNestedFunc_4(const float * const data_in, float * const data_out, size_t const size) { const size_t idx{threadIdx.x + blockIdx.x * blockDim.x}; const size_t idx_2{idx * 2}; if (idx_2 < size) { const float x1{data_in[idx_2]}; const float x2{data_in[idx_2 + 1]}; const float res1{PolyNestedRes(x1)}; const float res2{PolyNestedRes(x2)}; const float res3{PolyNestedRes(x1 + 1.0f)}; const float res4{PolyNestedRes(x2 + 1.0f)}; data_out[idx] = res2 - res1 + res4 - res3; } } __global__ void PolyRootsFunc_4(const float * const data_in, float * const data_out, size_t const size) { const size_t idx{threadIdx.x + blockIdx.x * blockDim.x}; const size_t idx_2{idx * 2}; if (idx_2 < size) { const float x1{data_in[idx_2]}; const float x2{data_in[idx_2 + 1]}; const float res1{PolyRootsRes(x1)}; const float res2{PolyRootsRes(x2)}; const float res3{PolyRootsRes(x1 + 1.0f)}; const float res4{PolyRootsRes(x2 + 1.0f)}; data_out[idx] = res2 - res1 + res4 - res3; } } __global__ void TrigFunc_6(const float * const data_in, float * const data_out, size_t const size) { const size_t idx{threadIdx.x + blockIdx.x * blockDim.x}; const size_t idx_2{idx * 2}; if (idx_2 < size) { const float x1{data_in[idx_2]}; const float x2{data_in[idx_2 + 1]}; const float res1{TrigRes(x1)}; const float res2{TrigRes(x2)}; const float res3{TrigRes(x1 + 1.0f)}; const float res4{TrigRes(x2 + 1.0f)}; const float res5{TrigRes(x1 - 1.0f)}; const float res6{TrigRes(x2 - 1.0f)}; data_out[idx] = res2 - res1 + res4 - res3 + res6 - res5; } } __global__ void PolyNormalFunc_6(const float * const data_in, float * const data_out, size_t const size) { const size_t idx{threadIdx.x + blockIdx.x * blockDim.x}; const size_t idx_2{idx * 2}; if (idx_2 < size) { const float x1{data_in[idx_2]}; const float x2{data_in[idx_2 + 1]}; const float res1{PolyNormalRes(x1)}; const float res2{PolyNormalRes(x2)}; const float res3{PolyNormalRes(x1 + 1.0f)}; const float res4{PolyNormalRes(x2 + 1.0f)}; const float res5{PolyNormalRes(x1 - 1.0f)}; const float res6{PolyNormalRes(x2 - 1.0f)}; data_out[idx] = res2 - res1 + res4 - res3 + res6 - res5; } } __global__ void PolyNormalCachedFunc_6(const float * const data_in, float * const data_out, size_t const size) { const size_t idx{threadIdx.x + blockIdx.x * blockDim.x}; const size_t idx_2{idx * 2}; if (idx_2 < size) { const float x1{data_in[idx_2]}; const float x2{data_in[idx_2 + 1]}; const float res1{PolyNormalCachedRes(x1)}; const float res2{PolyNormalCachedRes(x2)}; const float res3{PolyNormalCachedRes(x1 + 1.0f)}; const float res4{PolyNormalCachedRes(x2 + 1.0f)}; const float res5{PolyNormalCachedRes(x1 - 1.0f)}; const float res6{PolyNormalCachedRes(x2 - 1.0f)}; data_out[idx] = res2 - res1 + res4 - res3 + res6 - res5; } } __global__ void PolyNestedFunc_6(const float * const data_in, float * const data_out, size_t const size) { const size_t idx{threadIdx.x + blockIdx.x * blockDim.x}; const size_t idx_2{idx * 2}; if (idx_2 < size) { const float x1{data_in[idx_2]}; const float x2{data_in[idx_2 + 1]}; const float res1{PolyNestedRes(x1)}; const float res2{PolyNestedRes(x2)}; const float res3{PolyNestedRes(x1 + 1.0f)}; const float res4{PolyNestedRes(x2 + 1.0f)}; const float res5{PolyNestedRes(x1 - 1.0f)}; const float res6{PolyNestedRes(x2 - 1.0f)}; data_out[idx] = res2 - res1 + res4 - res3 + res6 - res5; } } __global__ void PolyRootsFunc_6(const float * const data_in, float * const data_out, size_t const size) { const size_t idx{threadIdx.x + blockIdx.x * blockDim.x}; const size_t idx_2{idx * 2}; if (idx_2 < size) { const float x1{data_in[idx_2]}; const float x2{data_in[idx_2 + 1]}; const float res1{PolyRootsRes(x1)}; const float res2{PolyRootsRes(x2)}; const float res3{PolyRootsRes(x1 + 1.0f)}; const float res4{PolyRootsRes(x2 + 1.0f)}; const float res5{PolyRootsRes(x1 - 1.0f)}; const float res6{PolyRootsRes(x2 - 1.0f)}; data_out[idx] = res2 - res1 + res4 - res3 + res6 - res5; } } __global__ void TrigFunc_8(const float * const data_in, float * const data_out, size_t const size) { const size_t idx{threadIdx.x + blockIdx.x * blockDim.x}; const size_t idx_2{idx * 2}; if (idx_2 < size) { const float x1{data_in[idx_2]}; const float x2{data_in[idx_2 + 1]}; const float res1{TrigRes(x1)}; const float res2{TrigRes(x2)}; const float res3{TrigRes(x1 + 1.0f)}; const float res4{TrigRes(x2 + 1.0f)}; const float res5{TrigRes(x1 - 1.0f)}; const float res6{TrigRes(x2 - 1.0f)}; const float res7{TrigRes(x1 + 2.0f)}; const float res8{TrigRes(x2 + 2.0f)}; data_out[idx] = res2 - res1 + res4 - res3 + res6 - res5 + res8 - res7; } } __global__ void PolyNormalFunc_8(const float * const data_in, float * const data_out, size_t const size) { const size_t idx{threadIdx.x + blockIdx.x * blockDim.x}; const size_t idx_2{idx * 2}; if (idx_2 < size) { const float x1{data_in[idx_2]}; const float x2{data_in[idx_2 + 1]}; const float res1{PolyNormalRes(x1)}; const float res2{PolyNormalRes(x2)}; const float res3{PolyNormalRes(x1 + 1.0f)}; const float res4{PolyNormalRes(x2 + 1.0f)}; const float res5{PolyNormalRes(x1 - 1.0f)}; const float res6{PolyNormalRes(x2 - 1.0f)}; const float res7{PolyNormalRes(x1 + 2.0f)}; const float res8{PolyNormalRes(x2 + 2.0f)}; data_out[idx] = res2 - res1 + res4 - res3 + res6 - res5 + res8 - res7; } } __global__ void PolyNormalCachedFunc_8(const float * const data_in, float * const data_out, size_t const size) { const size_t idx{threadIdx.x + blockIdx.x * blockDim.x}; const size_t idx_2{idx * 2}; if (idx_2 < size) { const float x1{data_in[idx_2]}; const float x2{data_in[idx_2 + 1]}; const float res1{PolyNormalCachedRes(x1)}; const float res2{PolyNormalCachedRes(x2)}; const float res3{PolyNormalCachedRes(x1 + 1.0f)}; const float res4{PolyNormalCachedRes(x2 + 1.0f)}; const float res5{PolyNormalCachedRes(x1 - 1.0f)}; const float res6{PolyNormalCachedRes(x2 - 1.0f)}; const float res7{PolyNormalCachedRes(x1 + 2.0f)}; const float res8{PolyNormalCachedRes(x2 + 2.0f)}; data_out[idx] = res2 - res1 + res4 - res3 + res6 - res5 + res8 - res7; } } __global__ void PolyNestedFunc_8(const float * const data_in, float * const data_out, size_t const size) { const size_t idx{threadIdx.x + blockIdx.x * blockDim.x}; const size_t idx_2{idx * 2}; if (idx_2 < size) { const float x1{data_in[idx_2]}; const float x2{data_in[idx_2 + 1]}; const float res1{PolyNestedRes(x1)}; const float res2{PolyNestedRes(x2)}; const float res3{PolyNestedRes(x1 + 1.0f)}; const float res4{PolyNestedRes(x2 + 1.0f)}; const float res5{PolyNestedRes(x1 - 1.0f)}; const float res6{PolyNestedRes(x2 - 1.0f)}; const float res7{PolyNestedRes(x1 + 2.0f)}; const float res8{PolyNestedRes(x2 + 2.0f)}; data_out[idx] = res2 - res1 + res4 - res3 + res6 - res5 + res8 - res7; } } __global__ void PolyRootsFunc_8(const float * const data_in, float * const data_out, size_t const size) { const size_t idx{threadIdx.x + blockIdx.x * blockDim.x}; const size_t idx_2{idx * 2}; if (idx_2 < size) { const float x1{data_in[idx_2]}; const float x2{data_in[idx_2 + 1]}; const float res1{PolyRootsRes(x1)}; const float res2{PolyRootsRes(x2)}; const float res3{PolyRootsRes(x1 + 1.0f)}; const float res4{PolyRootsRes(x2 + 1.0f)}; const float res5{PolyRootsRes(x1 - 1.0f)}; const float res6{PolyRootsRes(x2 - 1.0f)}; const float res7{PolyRootsRes(x1 + 2.0f)}; const float res8{PolyRootsRes(x2 + 2.0f)}; data_out[idx] = res2 - res1 + res4 - res3 + res6 - res5 + res8 - res7; } }
5b8919e0780e09510785d4db3f76b539ceca7ab6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.1.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date August 2016 @precisions normal z -> c d s */ #include "magmasparse_internal.h" #define BLOCK_SIZE 512 template<bool betazero> __global__ void zmgeelltmv_kernel( int num_rows, int num_cols, int num_vecs, int num_cols_per_row, magmaDoubleComplex alpha, magmaDoubleComplex * dval, magma_index_t * dcolind, magmaDoubleComplex * dx, magmaDoubleComplex beta, magmaDoubleComplex * dy) { extern __shared__ magmaDoubleComplex dot[]; int row = blockDim.x * blockIdx.x + threadIdx.x; if(row < num_rows ) { for( int i=0; i<num_vecs; i++ ) dot[ threadIdx.x+ i*blockDim.x ] = MAGMA_Z_MAKE(0.0, 0.0); for ( int n = 0; n < num_cols_per_row; n++ ) { int col = dcolind [ num_rows * n + row ]; magmaDoubleComplex val = dval [ num_rows * n + row ]; for( int i=0; i<num_vecs; i++ ) dot[ threadIdx.x + i*blockDim.x ] += val * dx[col + i * num_cols ]; } for( int i=0; i<num_vecs; i++ ) { if (betazero) { dy[ row + i*num_cols ] = dot[ threadIdx.x + i*blockDim.x ] *alpha; } else { dy[ row + i*num_cols ] = dot[ threadIdx.x + i*blockDim.x ] * alpha + beta * dy [ row + i*num_cols ]; } } } } /** Purpose ------- This routine computes Y = alpha * A * X + beta * Y for X and Y sets of num_vec vectors on the GPU. Input format is ELL. Arguments --------- @param[in] transA magma_trans_t transposition parameter for A @param[in] m magma_int_t number of rows in A @param[in] n magma_int_t number of columns in A @param[in] num_vecs mama_int_t number of vectors @param[in] nnz_per_row magma_int_t number of elements in the longest row @param[in] alpha magmaDoubleComplex scalar multiplier @param[in] dval magmaDoubleComplex_ptr array containing values of A in ELL @param[in] dcolind magmaIndex_ptr columnindices of A in ELL @param[in] dx magmaDoubleComplex_ptr input vector x @param[in] beta magmaDoubleComplex scalar multiplier @param[out] dy magmaDoubleComplex_ptr input/output vector y @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_zblas ********************************************************************/ extern "C" magma_int_t magma_zmgeelltmv( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t num_vecs, magma_int_t nnz_per_row, magmaDoubleComplex alpha, magmaDoubleComplex_ptr dval, magmaIndex_ptr dcolind, magmaDoubleComplex_ptr dx, magmaDoubleComplex beta, magmaDoubleComplex_ptr dy, magma_queue_t queue ) { dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) ); magma_int_t threads = BLOCK_SIZE; unsigned int MEM_SIZE = num_vecs* BLOCK_SIZE * sizeof( magmaDoubleComplex ); // num_vecs vectors if (beta == MAGMA_Z_ZERO) { hipLaunchKernelGGL(( zmgeelltmv_kernel<true>), dim3(grid), dim3(threads), MEM_SIZE, queue->cuda_stream() , m, n, num_vecs, nnz_per_row, alpha, dval, dcolind, dx, beta, dy ); } else { hipLaunchKernelGGL(( zmgeelltmv_kernel<false>), dim3(grid), dim3(threads), MEM_SIZE, queue->cuda_stream() , m, n, num_vecs, nnz_per_row, alpha, dval, dcolind, dx, beta, dy ); } return MAGMA_SUCCESS; }
5b8919e0780e09510785d4db3f76b539ceca7ab6.cu
/* -- MAGMA (version 2.1.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date August 2016 @precisions normal z -> c d s */ #include "magmasparse_internal.h" #define BLOCK_SIZE 512 template<bool betazero> __global__ void zmgeelltmv_kernel( int num_rows, int num_cols, int num_vecs, int num_cols_per_row, magmaDoubleComplex alpha, magmaDoubleComplex * dval, magma_index_t * dcolind, magmaDoubleComplex * dx, magmaDoubleComplex beta, magmaDoubleComplex * dy) { extern __shared__ magmaDoubleComplex dot[]; int row = blockDim.x * blockIdx.x + threadIdx.x; if(row < num_rows ) { for( int i=0; i<num_vecs; i++ ) dot[ threadIdx.x+ i*blockDim.x ] = MAGMA_Z_MAKE(0.0, 0.0); for ( int n = 0; n < num_cols_per_row; n++ ) { int col = dcolind [ num_rows * n + row ]; magmaDoubleComplex val = dval [ num_rows * n + row ]; for( int i=0; i<num_vecs; i++ ) dot[ threadIdx.x + i*blockDim.x ] += val * dx[col + i * num_cols ]; } for( int i=0; i<num_vecs; i++ ) { if (betazero) { dy[ row + i*num_cols ] = dot[ threadIdx.x + i*blockDim.x ] *alpha; } else { dy[ row + i*num_cols ] = dot[ threadIdx.x + i*blockDim.x ] * alpha + beta * dy [ row + i*num_cols ]; } } } } /** Purpose ------- This routine computes Y = alpha * A * X + beta * Y for X and Y sets of num_vec vectors on the GPU. Input format is ELL. Arguments --------- @param[in] transA magma_trans_t transposition parameter for A @param[in] m magma_int_t number of rows in A @param[in] n magma_int_t number of columns in A @param[in] num_vecs mama_int_t number of vectors @param[in] nnz_per_row magma_int_t number of elements in the longest row @param[in] alpha magmaDoubleComplex scalar multiplier @param[in] dval magmaDoubleComplex_ptr array containing values of A in ELL @param[in] dcolind magmaIndex_ptr columnindices of A in ELL @param[in] dx magmaDoubleComplex_ptr input vector x @param[in] beta magmaDoubleComplex scalar multiplier @param[out] dy magmaDoubleComplex_ptr input/output vector y @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_zblas ********************************************************************/ extern "C" magma_int_t magma_zmgeelltmv( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t num_vecs, magma_int_t nnz_per_row, magmaDoubleComplex alpha, magmaDoubleComplex_ptr dval, magmaIndex_ptr dcolind, magmaDoubleComplex_ptr dx, magmaDoubleComplex beta, magmaDoubleComplex_ptr dy, magma_queue_t queue ) { dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) ); magma_int_t threads = BLOCK_SIZE; unsigned int MEM_SIZE = num_vecs* BLOCK_SIZE * sizeof( magmaDoubleComplex ); // num_vecs vectors if (beta == MAGMA_Z_ZERO) { zmgeelltmv_kernel<true><<< grid, threads, MEM_SIZE, queue->cuda_stream() >>> ( m, n, num_vecs, nnz_per_row, alpha, dval, dcolind, dx, beta, dy ); } else { zmgeelltmv_kernel<false><<< grid, threads, MEM_SIZE, queue->cuda_stream() >>> ( m, n, num_vecs, nnz_per_row, alpha, dval, dcolind, dx, beta, dy ); } return MAGMA_SUCCESS; }
dcb96693ceaf495a757c30e3ac62edecce332979.hip
// !!! This is a file automatically generated by hipify!!! #include <math.h> #include <string> #include <stdio.h> #include "archAPI.h" #include <stdlib.h> #include<string.h> #include <sys/resource.h> #include <stdint.h> #include <sys/sysinfo.h> #include <sys/time.h> #include "particle.h" //#include<cuda.h> //struct sysinfo { // long uptime; /* Seconds since boot */ // unsigned long loads[3]; /* 1, 5, and 15 minute load averages */ // unsigned long totalram; /* Total usable main memory size */ // unsigned long freeram; /* Available memory size */ // unsigned long sharedram; /* Amount of shared memory */ // unsigned long bufferram; /* Memory used by buffers */ // unsigned long totalswap; /* Total swap space size */ // unsigned long freeswap; /* swap space still available */ // unsigned short procs; /* Number of current processes */ // unsigned long totalhigh; /* Total high memory size */ // unsigned long freehigh; /* Available high memory size */ // unsigned int mem_unit; /* Memory unit size in bytes */ // char _f[20-2*sizeof(long)-sizeof(int)]; /* Padding for libc5 */ // }; using namespace std; int setPrintfLimit() { size_t sizeP; printf("Particle size %lu %lu CurrentTensor %d short %d\n",sizeof(Particle),sizeof(Particle)/sizeof(double),sizeof(CurrentTensor),sizeof(char)); hipDeviceGetLimit(&sizeP,hipLimitPrintfFifoSize); printf("printf default limit %lu \n",sizeP/1024/1024); sizeP *= 10000; hipDeviceSetLimit(hipLimitPrintfFifoSize, sizeP); hipDeviceGetLimit(&sizeP,hipLimitPrintfFifoSize); printf("printf limit set to %lu \n",sizeP/1024/1024); return 0; } double get_meminfo(void) { FILE *f; char str[100]; int mem_free; double dmem; // return 0.0; system("free>&free_mem_out.dat"); if((f = fopen("free_mem_out.dat","rt")) == NULL) return 0.0; fgets(str,100,f); fgets(str,100,f); mem_free = atoi(str + 30); dmem = (((double)mem_free)/1024)/1024; return dmem; } double get_meminfo1(void) { double retval=0; char tmp[256]={0x0}; /* note= add a path to meminfo like /usr/bin/meminfo to match where meminfo lives on your system */ FILE *shellcommand=popen("meminfo","r"); while(fgets(tmp,sizeof(tmp),shellcommand)!=NULL) { if(memcmp(tmp,"Mem:",4)==0) { int wordcount=0; std::string delimiter=" "; char *p=strtok(tmp,delimiter.c_str()); while(*p) { wordcount++; if(wordcount==3) retval=atof(p); } } } pclose(shellcommand); return retval; } double CheckArraySilent (double* a, double* dbg_a,int size) { // Cell<Particle> c = (*AllCells)[0]; double diff = 0.0; for(int n = 0;n < size;n++) { diff += pow(a[n] - dbg_a[n],2.0); // if(fabs(a[n] - dbg_a[n]) > TOLERANCE) // { // // int3 i = c.getCellTripletNumber(n); // // } } return pow(diff/(size),0.5); } void get_load_data_file_names( string & t_jxfile, string & t_jyfile, string & t_jzfile, string & t_d_jxfile, string & t_d_jyfile, string & t_d_jzfile, string & t_np_jxfile, string & t_np_jyfile, string & t_np_jzfile, string & t_qxfile, string & t_qyfile, string & t_qzfile,int nt) { char d_exfile[100],d_eyfile[100],d_ezfile[100],d_hxfile[100],d_hyfile[100],d_hzfile[100]; char d_0exfile[100],d_0eyfile[100],d_0ezfile[100]; char jxfile[100],jyfile[100],jzfile[100]; char np_jxfile[100],np_jyfile[100],np_jzfile[100]; char np_exfile[100],np_eyfile[100],np_ezfile[100]; char d_jxfile[100],d_jyfile[100],d_jzfile[100]; char qxfile[100],qyfile[100],qzfile[100]; char pfile[100],nextpfile[100]; // char part_name[100]; sprintf(qxfile,"dnqx%06d.dat",nt); sprintf(qyfile,"dnqy%06d.dat",nt); sprintf(qzfile,"dnqz%06d.dat",nt); sprintf(d_exfile,"dnex%06d.dat",2*nt-1); sprintf(d_eyfile,"dney%06d.dat",2*nt-1); sprintf(d_ezfile,"dnez%06d.dat",2*nt-1); sprintf(d_0exfile,"dnex%06d.dat",2*nt-2); sprintf(d_0eyfile,"dney%06d.dat",2*nt-2); sprintf(d_0ezfile,"dnez%06d.dat",2*nt-2); sprintf(d_hxfile,"dnhx%06d.dat",2*nt-1); sprintf(d_hyfile,"dnhy%06d.dat",2*nt-1); puts(d_hyfile); sprintf(d_hzfile,"dnhz%06d.dat",2*nt-1); sprintf(jxfile,"dnjx%06d.dat",2*nt); sprintf(jyfile,"dnjy%06d.dat",2*nt); sprintf(jzfile,"dnjz%06d.dat",2*nt); sprintf(d_jxfile,"npjx%06d.dat",2*nt); sprintf(d_jyfile,"npjy%06d.dat",2*nt); sprintf(d_jzfile,"npjz%06d.dat",2*nt); sprintf(np_jxfile,"npjx%06d.dat",2*nt); sprintf(np_jyfile,"npjy%06d.dat",2*nt); sprintf(np_jzfile,"npjz%06d.dat",2*nt); sprintf(np_exfile,"exlg%03d.dat",2*nt); sprintf(np_eyfile,"eylg%03d.dat",2*nt); sprintf(np_ezfile,"ezlg%03d.dat",2*nt); sprintf(pfile, "part%06d000.dat",nt); sprintf(nextpfile,"part%06d000.dat",nt+2); t_jxfile = jxfile; t_jyfile = jyfile; t_jzfile = jzfile; t_d_jxfile = d_jxfile; t_d_jyfile = d_jyfile; t_d_jzfile = d_jzfile; t_np_jxfile = np_jxfile; t_np_jyfile = np_jyfile; t_np_jzfile = np_jzfile; t_qxfile = qxfile; t_qyfile = qyfile; t_qzfile = qzfile; } void hipMalloc3D(double **X,double **Y,double**Z,int nx,int ny,int nz) { hipMalloc(X,sizeof(double)*(nx+2)*(ny+2)*(nz+2)); hipMalloc(Y,sizeof(double)*(nx+2)*(ny+2)*(nz+2)); hipMalloc(Z,sizeof(double)*(nx+2)*(ny+2)*(nz+2)); } void copyFieldsToGPU( double *d_Ex,double *d_Ey,double *d_Ez, double *d_Hx,double *d_Hy,double *d_Hz, double *d_Jx,double *d_Jy,double *d_Jz, double *d_npJx,double *d_npJy,double *d_npJz, double *d_Qx,double *d_Qy,double *d_Qz, double *Ex,double *Ey,double *Ez, double *Hx,double *Hy,double *Hz, double *Jx,double *Jy,double *Jz, double *npJx,double *npJy,double *npJz, double *Qx,double *Qy,double *Qz, int Nx,int Ny,int Nz ) { int err; err = MemoryCopy(d_Ex,Ex,sizeof(double)*(Nx+2)*(Ny+2)*(Nz+2),HOST_TO_DEVICE); if(err != hipSuccess) { printf("1copyFieldsToGPU err %d %s \n",err,getErrorString(err)); exit(0); } err = MemoryCopy(d_Ey,Ey,sizeof(double)*(Nx+2)*(Ny+2)*(Nz+2),HOST_TO_DEVICE); if(err != hipSuccess) { printf("2copyFieldsToGPU err %d %s \n",err,getErrorString(err)); exit(0); } err = MemoryCopy(d_Ez,Ez,sizeof(double)*(Nx+2)*(Ny+2)*(Nz+2),HOST_TO_DEVICE); if(err != hipSuccess) { printf("3copyFieldsToGPU err %d %s \n",err,getErrorString(err)); exit(0); } err = MemoryCopy(d_Hx,Hx,sizeof(double)*(Nx+2)*(Ny+2)*(Nz+2),HOST_TO_DEVICE); if(err != hipSuccess) { printf("4copyFieldsToGPU err %d %s \n",err,getErrorString(err)); exit(0); } err = MemoryCopy(d_Hy,Hy,sizeof(double)*(Nx+2)*(Ny+2)*(Nz+2),HOST_TO_DEVICE); if(err != hipSuccess) { printf("5copyFieldsToGPU err %d %s \n",err,getErrorString(err)); exit(0); } err = MemoryCopy(d_Hz,Hz,sizeof(double)*(Nx+2)*(Ny+2)*(Nz+2),HOST_TO_DEVICE); if(err != hipSuccess) { printf("6copyFieldsToGPU err %d %s \n",err,getErrorString(err)); exit(0); } err = MemoryCopy(d_Jx,Jx,sizeof(double)*(Nx+2)*(Ny+2)*(Nz+2),HOST_TO_DEVICE); if(err != hipSuccess) { printf("7copyFieldsToGPU err %d %s \n",err,getErrorString(err)); exit(0); } err = MemoryCopy(d_Jy,Jy,sizeof(double)*(Nx+2)*(Ny+2)*(Nz+2),HOST_TO_DEVICE); if(err != hipSuccess) { printf("8copyFieldsToGPU err %d %s \n",err,getErrorString(err)); exit(0); } err = MemoryCopy(d_Jz,Jz,sizeof(double)*(Nx+2)*(Ny+2)*(Nz+2),HOST_TO_DEVICE); if(err != hipSuccess) { printf("9copyFieldsToGPU err %d %s \n",err,getErrorString(err)); exit(0); } err = MemoryCopy(d_npJx,npJx,sizeof(double)*(Nx+2)*(Ny+2)*(Nz+2),HOST_TO_DEVICE); if(err != hipSuccess) { printf("10copyFieldsToGPU err %d %s \n",err,getErrorString(err)); exit(0); } err = MemoryCopy(d_npJy,npJy,sizeof(double)*(Nx+2)*(Ny+2)*(Nz+2),HOST_TO_DEVICE); if(err != hipSuccess) { printf("11copyFieldsToGPU err %d %s \n",err,getErrorString(err)); exit(0); } err = MemoryCopy(d_npJz,npJz,sizeof(double)*(Nx+2)*(Ny+2)*(Nz+2),HOST_TO_DEVICE); if(err != hipSuccess) { printf("12copyFieldsToGPU err %d %s \n",err,getErrorString(err)); exit(0); } err = MemoryCopy(d_Qx,Qx,sizeof(double)*(Nx+2)*(Ny+2)*(Nz+2),HOST_TO_DEVICE); if(err != hipSuccess) { printf("13copyFieldsToGPU err %d %s \n",err,getErrorString(err)); exit(0); } err = MemoryCopy(d_Qy,Qy,sizeof(double)*(Nx+2)*(Ny+2)*(Nz+2),HOST_TO_DEVICE); if(err != hipSuccess) { printf("14copyFieldsToGPU err %d %s \n",err,getErrorString(err)); exit(0); } err = MemoryCopy(d_Qz,Qz,sizeof(double)*(Nx+2)*(Ny+2)*(Nz+2),HOST_TO_DEVICE); if(err != hipSuccess) { printf("15copyFieldsToGPU err %d %s \n",err,getErrorString(err)); exit(0); } } void InitGPUFields( double **d_Ex,double **d_Ey,double **d_Ez, double **d_Hx,double **d_Hy,double **d_Hz, double **d_Jx,double **d_Jy,double **d_Jz, double **d_npJx,double **d_npJy,double **d_npJz, double **d_Qx,double **d_Qy,double **d_Qz, double *Ex,double *Ey,double *Ez, double *Hx,double *Hy,double *Hz, double *Jx,double *Jy,double *Jz, double *npJx,double *npJy,double *npJz, double *Qx,double *Qy,double *Qz, int Nx,int Ny,int Nz ) { hipMalloc3D(d_Ex,d_Ey,d_Ez,Nx,Ny,Nz); hipMalloc3D(d_Hx,d_Hy,d_Hz,Nx,Ny,Nz); hipMalloc3D(d_Jx,d_Jy,d_Jz,Nx,Ny,Nz); hipMalloc3D(d_npJx,d_npJy,d_npJz,Nx,Ny,Nz); hipMalloc3D(d_Qx,d_Qy,d_Qz,Nx,Ny,Nz); copyFieldsToGPU( *d_Ex,*d_Ey,*d_Ez, *d_Hx,*d_Hy,*d_Hz, *d_Jx,*d_Jy,*d_Jz, *d_npJx,*d_npJy,*d_npJz, *d_Qx,*d_Qy,*d_Qz, Ex,Ey,Ez, Hx,Hy,Hz, Jx,Jy,Jz, npJx,npJy,npJz, Qx,Qy,Qz, Nx,Ny,Nz ); }
dcb96693ceaf495a757c30e3ac62edecce332979.cu
#include <math.h> #include <string> #include <stdio.h> #include "archAPI.h" #include <stdlib.h> #include<string.h> #include <sys/resource.h> #include <stdint.h> #include <sys/sysinfo.h> #include <sys/time.h> #include "particle.h" //#include<cuda.h> //struct sysinfo { // long uptime; /* Seconds since boot */ // unsigned long loads[3]; /* 1, 5, and 15 minute load averages */ // unsigned long totalram; /* Total usable main memory size */ // unsigned long freeram; /* Available memory size */ // unsigned long sharedram; /* Amount of shared memory */ // unsigned long bufferram; /* Memory used by buffers */ // unsigned long totalswap; /* Total swap space size */ // unsigned long freeswap; /* swap space still available */ // unsigned short procs; /* Number of current processes */ // unsigned long totalhigh; /* Total high memory size */ // unsigned long freehigh; /* Available high memory size */ // unsigned int mem_unit; /* Memory unit size in bytes */ // char _f[20-2*sizeof(long)-sizeof(int)]; /* Padding for libc5 */ // }; using namespace std; int setPrintfLimit() { size_t sizeP; printf("Particle size %lu %lu CurrentTensor %d short %d\n",sizeof(Particle),sizeof(Particle)/sizeof(double),sizeof(CurrentTensor),sizeof(char)); cudaDeviceGetLimit(&sizeP,cudaLimitPrintfFifoSize); printf("printf default limit %lu \n",sizeP/1024/1024); sizeP *= 10000; cudaDeviceSetLimit(cudaLimitPrintfFifoSize, sizeP); cudaDeviceGetLimit(&sizeP,cudaLimitPrintfFifoSize); printf("printf limit set to %lu \n",sizeP/1024/1024); return 0; } double get_meminfo(void) { FILE *f; char str[100]; int mem_free; double dmem; // return 0.0; system("free>&free_mem_out.dat"); if((f = fopen("free_mem_out.dat","rt")) == NULL) return 0.0; fgets(str,100,f); fgets(str,100,f); mem_free = atoi(str + 30); dmem = (((double)mem_free)/1024)/1024; return dmem; } double get_meminfo1(void) { double retval=0; char tmp[256]={0x0}; /* note= add a path to meminfo like /usr/bin/meminfo to match where meminfo lives on your system */ FILE *shellcommand=popen("meminfo","r"); while(fgets(tmp,sizeof(tmp),shellcommand)!=NULL) { if(memcmp(tmp,"Mem:",4)==0) { int wordcount=0; std::string delimiter=" "; char *p=strtok(tmp,delimiter.c_str()); while(*p) { wordcount++; if(wordcount==3) retval=atof(p); } } } pclose(shellcommand); return retval; } double CheckArraySilent (double* a, double* dbg_a,int size) { // Cell<Particle> c = (*AllCells)[0]; double diff = 0.0; for(int n = 0;n < size;n++) { diff += pow(a[n] - dbg_a[n],2.0); // if(fabs(a[n] - dbg_a[n]) > TOLERANCE) // { // // int3 i = c.getCellTripletNumber(n); // // } } return pow(diff/(size),0.5); } void get_load_data_file_names( string & t_jxfile, string & t_jyfile, string & t_jzfile, string & t_d_jxfile, string & t_d_jyfile, string & t_d_jzfile, string & t_np_jxfile, string & t_np_jyfile, string & t_np_jzfile, string & t_qxfile, string & t_qyfile, string & t_qzfile,int nt) { char d_exfile[100],d_eyfile[100],d_ezfile[100],d_hxfile[100],d_hyfile[100],d_hzfile[100]; char d_0exfile[100],d_0eyfile[100],d_0ezfile[100]; char jxfile[100],jyfile[100],jzfile[100]; char np_jxfile[100],np_jyfile[100],np_jzfile[100]; char np_exfile[100],np_eyfile[100],np_ezfile[100]; char d_jxfile[100],d_jyfile[100],d_jzfile[100]; char qxfile[100],qyfile[100],qzfile[100]; char pfile[100],nextpfile[100]; // char part_name[100]; sprintf(qxfile,"dnqx%06d.dat",nt); sprintf(qyfile,"dnqy%06d.dat",nt); sprintf(qzfile,"dnqz%06d.dat",nt); sprintf(d_exfile,"dnex%06d.dat",2*nt-1); sprintf(d_eyfile,"dney%06d.dat",2*nt-1); sprintf(d_ezfile,"dnez%06d.dat",2*nt-1); sprintf(d_0exfile,"dnex%06d.dat",2*nt-2); sprintf(d_0eyfile,"dney%06d.dat",2*nt-2); sprintf(d_0ezfile,"dnez%06d.dat",2*nt-2); sprintf(d_hxfile,"dnhx%06d.dat",2*nt-1); sprintf(d_hyfile,"dnhy%06d.dat",2*nt-1); puts(d_hyfile); sprintf(d_hzfile,"dnhz%06d.dat",2*nt-1); sprintf(jxfile,"dnjx%06d.dat",2*nt); sprintf(jyfile,"dnjy%06d.dat",2*nt); sprintf(jzfile,"dnjz%06d.dat",2*nt); sprintf(d_jxfile,"npjx%06d.dat",2*nt); sprintf(d_jyfile,"npjy%06d.dat",2*nt); sprintf(d_jzfile,"npjz%06d.dat",2*nt); sprintf(np_jxfile,"npjx%06d.dat",2*nt); sprintf(np_jyfile,"npjy%06d.dat",2*nt); sprintf(np_jzfile,"npjz%06d.dat",2*nt); sprintf(np_exfile,"exlg%03d.dat",2*nt); sprintf(np_eyfile,"eylg%03d.dat",2*nt); sprintf(np_ezfile,"ezlg%03d.dat",2*nt); sprintf(pfile, "part%06d000.dat",nt); sprintf(nextpfile,"part%06d000.dat",nt+2); t_jxfile = jxfile; t_jyfile = jyfile; t_jzfile = jzfile; t_d_jxfile = d_jxfile; t_d_jyfile = d_jyfile; t_d_jzfile = d_jzfile; t_np_jxfile = np_jxfile; t_np_jyfile = np_jyfile; t_np_jzfile = np_jzfile; t_qxfile = qxfile; t_qyfile = qyfile; t_qzfile = qzfile; } void cudaMalloc3D(double **X,double **Y,double**Z,int nx,int ny,int nz) { cudaMalloc(X,sizeof(double)*(nx+2)*(ny+2)*(nz+2)); cudaMalloc(Y,sizeof(double)*(nx+2)*(ny+2)*(nz+2)); cudaMalloc(Z,sizeof(double)*(nx+2)*(ny+2)*(nz+2)); } void copyFieldsToGPU( double *d_Ex,double *d_Ey,double *d_Ez, double *d_Hx,double *d_Hy,double *d_Hz, double *d_Jx,double *d_Jy,double *d_Jz, double *d_npJx,double *d_npJy,double *d_npJz, double *d_Qx,double *d_Qy,double *d_Qz, double *Ex,double *Ey,double *Ez, double *Hx,double *Hy,double *Hz, double *Jx,double *Jy,double *Jz, double *npJx,double *npJy,double *npJz, double *Qx,double *Qy,double *Qz, int Nx,int Ny,int Nz ) { int err; err = MemoryCopy(d_Ex,Ex,sizeof(double)*(Nx+2)*(Ny+2)*(Nz+2),HOST_TO_DEVICE); if(err != cudaSuccess) { printf("1copyFieldsToGPU err %d %s \n",err,getErrorString(err)); exit(0); } err = MemoryCopy(d_Ey,Ey,sizeof(double)*(Nx+2)*(Ny+2)*(Nz+2),HOST_TO_DEVICE); if(err != cudaSuccess) { printf("2copyFieldsToGPU err %d %s \n",err,getErrorString(err)); exit(0); } err = MemoryCopy(d_Ez,Ez,sizeof(double)*(Nx+2)*(Ny+2)*(Nz+2),HOST_TO_DEVICE); if(err != cudaSuccess) { printf("3copyFieldsToGPU err %d %s \n",err,getErrorString(err)); exit(0); } err = MemoryCopy(d_Hx,Hx,sizeof(double)*(Nx+2)*(Ny+2)*(Nz+2),HOST_TO_DEVICE); if(err != cudaSuccess) { printf("4copyFieldsToGPU err %d %s \n",err,getErrorString(err)); exit(0); } err = MemoryCopy(d_Hy,Hy,sizeof(double)*(Nx+2)*(Ny+2)*(Nz+2),HOST_TO_DEVICE); if(err != cudaSuccess) { printf("5copyFieldsToGPU err %d %s \n",err,getErrorString(err)); exit(0); } err = MemoryCopy(d_Hz,Hz,sizeof(double)*(Nx+2)*(Ny+2)*(Nz+2),HOST_TO_DEVICE); if(err != cudaSuccess) { printf("6copyFieldsToGPU err %d %s \n",err,getErrorString(err)); exit(0); } err = MemoryCopy(d_Jx,Jx,sizeof(double)*(Nx+2)*(Ny+2)*(Nz+2),HOST_TO_DEVICE); if(err != cudaSuccess) { printf("7copyFieldsToGPU err %d %s \n",err,getErrorString(err)); exit(0); } err = MemoryCopy(d_Jy,Jy,sizeof(double)*(Nx+2)*(Ny+2)*(Nz+2),HOST_TO_DEVICE); if(err != cudaSuccess) { printf("8copyFieldsToGPU err %d %s \n",err,getErrorString(err)); exit(0); } err = MemoryCopy(d_Jz,Jz,sizeof(double)*(Nx+2)*(Ny+2)*(Nz+2),HOST_TO_DEVICE); if(err != cudaSuccess) { printf("9copyFieldsToGPU err %d %s \n",err,getErrorString(err)); exit(0); } err = MemoryCopy(d_npJx,npJx,sizeof(double)*(Nx+2)*(Ny+2)*(Nz+2),HOST_TO_DEVICE); if(err != cudaSuccess) { printf("10copyFieldsToGPU err %d %s \n",err,getErrorString(err)); exit(0); } err = MemoryCopy(d_npJy,npJy,sizeof(double)*(Nx+2)*(Ny+2)*(Nz+2),HOST_TO_DEVICE); if(err != cudaSuccess) { printf("11copyFieldsToGPU err %d %s \n",err,getErrorString(err)); exit(0); } err = MemoryCopy(d_npJz,npJz,sizeof(double)*(Nx+2)*(Ny+2)*(Nz+2),HOST_TO_DEVICE); if(err != cudaSuccess) { printf("12copyFieldsToGPU err %d %s \n",err,getErrorString(err)); exit(0); } err = MemoryCopy(d_Qx,Qx,sizeof(double)*(Nx+2)*(Ny+2)*(Nz+2),HOST_TO_DEVICE); if(err != cudaSuccess) { printf("13copyFieldsToGPU err %d %s \n",err,getErrorString(err)); exit(0); } err = MemoryCopy(d_Qy,Qy,sizeof(double)*(Nx+2)*(Ny+2)*(Nz+2),HOST_TO_DEVICE); if(err != cudaSuccess) { printf("14copyFieldsToGPU err %d %s \n",err,getErrorString(err)); exit(0); } err = MemoryCopy(d_Qz,Qz,sizeof(double)*(Nx+2)*(Ny+2)*(Nz+2),HOST_TO_DEVICE); if(err != cudaSuccess) { printf("15copyFieldsToGPU err %d %s \n",err,getErrorString(err)); exit(0); } } void InitGPUFields( double **d_Ex,double **d_Ey,double **d_Ez, double **d_Hx,double **d_Hy,double **d_Hz, double **d_Jx,double **d_Jy,double **d_Jz, double **d_npJx,double **d_npJy,double **d_npJz, double **d_Qx,double **d_Qy,double **d_Qz, double *Ex,double *Ey,double *Ez, double *Hx,double *Hy,double *Hz, double *Jx,double *Jy,double *Jz, double *npJx,double *npJy,double *npJz, double *Qx,double *Qy,double *Qz, int Nx,int Ny,int Nz ) { cudaMalloc3D(d_Ex,d_Ey,d_Ez,Nx,Ny,Nz); cudaMalloc3D(d_Hx,d_Hy,d_Hz,Nx,Ny,Nz); cudaMalloc3D(d_Jx,d_Jy,d_Jz,Nx,Ny,Nz); cudaMalloc3D(d_npJx,d_npJy,d_npJz,Nx,Ny,Nz); cudaMalloc3D(d_Qx,d_Qy,d_Qz,Nx,Ny,Nz); copyFieldsToGPU( *d_Ex,*d_Ey,*d_Ez, *d_Hx,*d_Hy,*d_Hz, *d_Jx,*d_Jy,*d_Jz, *d_npJx,*d_npJy,*d_npJz, *d_Qx,*d_Qy,*d_Qz, Ex,Ey,Ez, Hx,Hy,Hz, Jx,Jy,Jz, npJx,npJy,npJz, Qx,Qy,Qz, Nx,Ny,Nz ); }
1000e3d08367cc50ff887224a8f3f53ae6deb18b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "kernel.h" #include "edwards.h" #include "twisted.h" // Vrt vsek z NAF rozvoje __device__ int build(char* bits,unsigned int start,unsigned int end) { int ret = 0; for (unsigned int i = start;i <= end;i++) { ret += bits[i]*(1 << (i-start)); } return ret; } // Vpoet pomoc sliding window pro kivky s a=1 __global__ void slidingWindowE(void* pY,void* pPc,void* swAux,void* swCoeff) { PREPARE(); VOL digit_t* Qd = ((digit_t*)pY)+idx; // Nakoprovn pracovnch dat pro Y c_x1[threadIdx.x] = *(Qd+threadIdx.x+0*NB_DIGITS); // prvnch 32 cifer pat k X c_y1[threadIdx.x] = *(Qd+threadIdx.x+1*NB_DIGITS); // dalch 32 cifer pat k Y c_z1[threadIdx.x] = *(Qd+threadIdx.x+2*NB_DIGITS); // dalch 32 k souadnici Z c_t1[threadIdx.x] = *(Qd+threadIdx.x+3*NB_DIGITS); // ... a posledn k souadnici T char* Cf = (char*)swCoeff; for (int i = ax->nafLen-1,u,s = 0;i >= 0;) { if (Cf[i] == 0) { edwardsDbl(); --i; } else { s = i - ax->windowSz + 1; s = s > 0 ? s : 0; while (!Cf[s]) ++s; for (int h = 1;h <= i-s+1;++h) { edwardsDbl(); } u = build(Cf,s,i); if (u > 0){ Qd = ((digit_t*)pPc)+idx+((u-1)/2)*NUM_CURVES*4*NB_DIGITS; edwardsAdd(); } else { Qd = ((digit_t*)pPc)+idx+((-u-1)/2)*NUM_CURVES*4*NB_DIGITS; edwardsSub(); } i = s-1; } } // Nakoprovn pracovnch dat zptky do Y Qd = ((digit_t*)pY) + idx; *(Qd+threadIdx.x+0*NB_DIGITS) = c_x1[threadIdx.x]; // prvnch 32 cifer pat k X *(Qd+threadIdx.x+1*NB_DIGITS) = c_y1[threadIdx.x]; // dalch 32 cifer pat k Y *(Qd+threadIdx.x+2*NB_DIGITS) = c_z1[threadIdx.x]; // dalch 32 k souadnici Z *(Qd+threadIdx.x+3*NB_DIGITS) = c_t1[threadIdx.x]; // ... a posledn k souadnici T __syncthreads(); } // Vpoet pomoc sliding window pro kivky s a=-1 __global__ void slidingWindowT(void* pY,void* pPc,void* swAux,void* swCoeff) { PREPARE(); VOL digit_t* Qd = ((digit_t*)pY)+idx; // Nakoprovn pracovnch dat pro Y c_x1[threadIdx.x] = *(Qd+threadIdx.x+0*NB_DIGITS); // prvnch 32 cifer pat k X c_y1[threadIdx.x] = *(Qd+threadIdx.x+1*NB_DIGITS); // dalch 32 cifer pat k Y c_z1[threadIdx.x] = *(Qd+threadIdx.x+2*NB_DIGITS); // dalch 32 k souadnici Z c_t1[threadIdx.x] = *(Qd+threadIdx.x+3*NB_DIGITS); // ... a posledn k souadnici T char* Cf = (char*)swCoeff; for (int i = ax->nafLen-1,u,s = 0;i >= 0;) { if (Cf[i] == 0) { twistedDbl(); --i; } else { s = i - ax->windowSz + 1; s = s > 0 ? s : 0; while (!Cf[s]) ++s; for (int h = 1;h <= i-s+1;++h) { twistedDbl(); } u = build(Cf,s,i); if (u > 0){ Qd = ((digit_t*)pPc)+idx+((u-1)/2)*NUM_CURVES*4*NB_DIGITS; twistedAdd(); } else { Qd = ((digit_t*)pPc)+idx+((-u-1)/2)*NUM_CURVES*4*NB_DIGITS; twistedSub(); } i = s-1; } } // Nakoprovn pracovnch dat zptky do Y Qd = ((digit_t*)pY) + idx; *(Qd+threadIdx.x+0*NB_DIGITS) = c_x1[threadIdx.x]; // prvnch 32 cifer pat k X *(Qd+threadIdx.x+1*NB_DIGITS) = c_y1[threadIdx.x]; // dalch 32 cifer pat k Y *(Qd+threadIdx.x+2*NB_DIGITS) = c_z1[threadIdx.x]; // dalch 32 k souadnici Z *(Qd+threadIdx.x+3*NB_DIGITS) = c_t1[threadIdx.x]; // ... a posledn k souadnici T __syncthreads(); } __global__ void precomputeE(void* pX,void* pCube,void* swAux) { PREPARE(); VOL digit_t* Qd = ((digit_t*)pX) + idx; VOL digit_t* out = ((digit_t*)pCube) + idx; // Nakoprovn pracovnch dat pro Y c_x1[threadIdx.x] = *(Qd+threadIdx.x+0*NB_DIGITS); // prvnch 32 cifer pat k X c_y1[threadIdx.x] = *(Qd+threadIdx.x+1*NB_DIGITS); // dalch 32 cifer pat k Y c_z1[threadIdx.x] = *(Qd+threadIdx.x+2*NB_DIGITS); // dalch 32 k souadnici Z c_t1[threadIdx.x] = *(Qd+threadIdx.x+3*NB_DIGITS); // ... a posledn k souadnici T edwardsDbl(); for (int i = 1; i < (1 << (ax->windowSz-1));++i) { edwardsAdd(); // Vsledek na sv msto *(out+threadIdx.x+0*NB_DIGITS) = c_x1[threadIdx.x]; // prvnch 32 cifer pat k X *(out+threadIdx.x+1*NB_DIGITS) = c_y1[threadIdx.x]; // dalch 32 cifer pat k Y *(out+threadIdx.x+2*NB_DIGITS) = c_z1[threadIdx.x]; // dalch 32 k souadnici Z *(out+threadIdx.x+3*NB_DIGITS) = c_t1[threadIdx.x]; // ... a posledn k souadnici T out += NUM_CURVES*4*NB_DIGITS; __syncthreads(); edwardsAdd(); } } __global__ void precomputeT(void* pX,void* pCube,void* swAux) { PREPARE(); VOL digit_t* Qd = ((digit_t*)pX) + idx; VOL digit_t* out = ((digit_t*)pCube) + idx; // Nakoprovn pracovnch dat pro Y c_x1[threadIdx.x] = *(Qd+threadIdx.x+0*NB_DIGITS); // prvnch 32 cifer pat k X c_y1[threadIdx.x] = *(Qd+threadIdx.x+1*NB_DIGITS); // dalch 32 cifer pat k Y c_z1[threadIdx.x] = *(Qd+threadIdx.x+2*NB_DIGITS); // dalch 32 k souadnici Z c_t1[threadIdx.x] = *(Qd+threadIdx.x+3*NB_DIGITS); // ... a posledn k souadnici T twistedDbl(); for (int i = 1; i < (1 << (ax->windowSz-1));++i) { twistedAdd(); // Vsledek na sv msto *(out+threadIdx.x+0*NB_DIGITS) = c_x1[threadIdx.x]; // prvnch 32 cifer pat k X *(out+threadIdx.x+1*NB_DIGITS) = c_y1[threadIdx.x]; // dalch 32 cifer pat k Y *(out+threadIdx.x+2*NB_DIGITS) = c_z1[threadIdx.x]; // dalch 32 k souadnici Z *(out+threadIdx.x+3*NB_DIGITS) = c_t1[threadIdx.x]; // ... a posledn k souadnici T out += NUM_CURVES*4*NB_DIGITS; __syncthreads(); twistedAdd(); } } hipError_t computeMixed(const ComputeConfig& cfg,const ExtendedPoint* neutral,ExtendedPoint* initPoints,const NAF& coeff) { const int PRECOMP_SZ = (1 << (cfg.windowSz-1)); // Poet bod, kter je nutn pedpotat const int NUM_CURVES = cfg.numCurves; // Poet natench kivek int blcks = NUM_CURVES/CURVES_PER_BLOCK; const int NUM_BLOCKS = (blcks == 0 ? 1 : blcks)/2; // Poet pouitch blok const int USE_DEVICE = cfg.deviceId; // ID zazen, kter bude pouito int devs = 0; gpuErrchk(hipGetDeviceCount(&devs)); if (USE_DEVICE < 0 || USE_DEVICE >= devs) { fprintf(stderr,"Launch failed: invalid device ID.\n"); return hipErrorInvalidDevice; } hipEvent_t start,stop; float totalTime = 0; void *swQw = NULL,*swPc = NULL,*swAx = NULL,*swCf = NULL; gpuErrchk(hipSetDevice(USE_DEVICE)); // Zjitn vlastnost zazen hipDeviceProp_t prop; gpuErrchk(hipGetDeviceProperties(&prop, 0)); // Oven, e Compute Capability je alespo 2.0 if (prop.major < 2) { fprintf(stderr,"Launch failed: compute capability of the device must be at least 2.0.\n"); return hipErrorInitializationError; } // Oven, e se vechny kivky vejdou do sdlen pamti if ((int)prop.sharedMemPerBlock*prop.multiProcessorCount < NUM_CURVES*CURVE_MEMORY_SIZE) { fprintf(stderr,"Launch failed: cannot fit curves into the shared memory.\n"); return hipErrorLaunchOutOfResources; } // Vytvoit eventy pro men asu gpuErrchk(hipEventCreate(&start)); gpuErrchk(hipEventCreate(&stop)); // Alokace potebnch dat cuda_Malloc((void**)&swPc,NUM_CURVES*PRECOMP_SZ*4*MAX_BYTES); // Pedpotan body cuda_Malloc((void**)&swQw,NUM_CURVES*4*MAX_BYTES); // Pomocn bod cuda_Malloc((void**)&swAx,sizeof(ComputeConfig)); // Pomocn struktura cuda_Malloc((void**)&swCf,cfg.nafLen); // NAF rozvoj koeficientu // Pomocn struktura cuda_Memcpy(swAx,(void*)&cfg,sizeof(ComputeConfig),hipMemcpyHostToDevice); // NAF rozvoj koeficientu cuda_Memcpy(swCf,(void*)coeff.bits,cfg.nafLen,hipMemcpyHostToDevice); // Poten body VOL digit_t* iter = (digit_t*)swPc; for (int i = 0;i < NUM_CURVES;i++){ cuda_Memcpy((void*)(iter+0*NB_DIGITS),(void*)initPoints[i].X,MAX_BYTES,hipMemcpyHostToDevice); cuda_Memcpy((void*)(iter+1*NB_DIGITS),(void*)initPoints[i].Y,MAX_BYTES,hipMemcpyHostToDevice); cuda_Memcpy((void*)(iter+2*NB_DIGITS),(void*)initPoints[i].Z,MAX_BYTES,hipMemcpyHostToDevice); cuda_Memcpy((void*)(iter+3*NB_DIGITS),(void*)initPoints[i].T,MAX_BYTES,hipMemcpyHostToDevice); iter += 4*NB_DIGITS; } // Konfigurace kernel dim3 threadsPerBlock(NB_DIGITS,CURVES_PER_BLOCK); printf("Device name and ID : %s (%d)\n",prop.name,USE_DEVICE); printf("Device compute capability: %d.%d\n",prop.major,prop.minor); printf("Execution configuration: %d x %d x %d\n",NUM_BLOCKS,CURVES_PER_BLOCK,NB_DIGITS); printf("--------------------------\n"); // Vytvoen stream hipStream_t edwardsStream,twistedStream; gpuErrchk(hipStreamCreate(&edwardsStream)); gpuErrchk(hipStreamCreate(&twistedStream)); // Startovac adresy pro stream s Edwardsovmi kivkami void* swPcE = ((digit_t*)swPc)+NUM_CURVES*2*NB_DIGITS; VOL digit_t* iterE = iter+NUM_CURVES*2*NB_DIGITS; // Dal pedpotan body START_MEASURE(start); hipLaunchKernelGGL(( precomputeT), dim3(NUM_BLOCKS),dim3(threadsPerBlock),0,twistedStream, (void*)swPc, (void*)iter, (void*)swAx); hipLaunchKernelGGL(( precomputeE), dim3(NUM_BLOCKS),dim3(threadsPerBlock),0,edwardsStream, (void*)swPcE,(void*)iterE,(void*)swAx); STOP_MEASURE("Precomputation phase",start,stop,totalTime); // Do swQw nakoprovat neutrln prvek iter = (digit_t*)swQw; for (int i = 0;i < NUM_CURVES;++i){ cuda_Memcpy((void*)(iter+0*NB_DIGITS),(void*)neutral->X,MAX_BYTES,hipMemcpyHostToDevice); cuda_Memcpy((void*)(iter+1*NB_DIGITS),(void*)neutral->Y,MAX_BYTES,hipMemcpyHostToDevice); cuda_Memcpy((void*)(iter+2*NB_DIGITS),(void*)neutral->Z,MAX_BYTES,hipMemcpyHostToDevice); cuda_Memcpy((void*)(iter+3*NB_DIGITS),(void*)neutral->T,MAX_BYTES,hipMemcpyHostToDevice); iter += 4*NB_DIGITS; } gpuErrchk(hipDeviceSynchronize()); // Startovac adresy pro stream s Edwardsovmi kivkami swPcE = ((digit_t*)swPc)+NUM_CURVES*2*NB_DIGITS; void* swQwE = ((digit_t*)swQw)+NUM_CURVES*2*NB_DIGITS; float pct = totalTime; START_MEASURE(start); hipLaunchKernelGGL(( slidingWindowT), dim3(NUM_BLOCKS),dim3(threadsPerBlock),0,twistedStream, (void*)swQw, (void*)swPc, (void*)swAx,(void*)swCf); hipLaunchKernelGGL(( slidingWindowE), dim3(NUM_BLOCKS),dim3(threadsPerBlock),0,edwardsStream, (void*)swQwE,(void*)swPcE,(void*)swAx,(void*)swCf); STOP_MEASURE("Computation phase",start,stop,totalTime); printf("--------------------------\n"); printf("Total time: %.3f ms\n",totalTime); cfg.cudaRunTime = totalTime-pct; gpuErrchk(hipDeviceSynchronize()); gpuErrchk(hipStreamDestroy(twistedStream)); gpuErrchk(hipStreamDestroy(edwardsStream)); // Nakoprovat vsledky zptky do pamti potae iter = (digit_t*)swQw; for (int i = 0;i < NUM_CURVES;i++){ cuda_Memcpy((void*)initPoints[i].X,(void*)(iter+0*NB_DIGITS),MAX_BYTES,hipMemcpyDeviceToHost); cuda_Memcpy((void*)initPoints[i].Y,(void*)(iter+1*NB_DIGITS),MAX_BYTES,hipMemcpyDeviceToHost); cuda_Memcpy((void*)initPoints[i].Z,(void*)(iter+2*NB_DIGITS),MAX_BYTES,hipMemcpyDeviceToHost); cuda_Memcpy((void*)initPoints[i].T,(void*)(iter+3*NB_DIGITS),MAX_BYTES,hipMemcpyDeviceToHost); iter += 4*NB_DIGITS; } // Zkontroluj chyby hipError_t cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) fprintf(stderr, "Launch failed: %s\n", hipGetErrorString(cudaStatus)); // Synchronizovat ve cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) fprintf(stderr, "Launch failed: %s\n", hipGetErrorString(cudaStatus)); // Uvolnit pam cuda_Free(swAx); cuda_Free(swQw); cuda_Free(swPc); cuda_Free(swCf); gpuErrchk(hipEventDestroy(start)); gpuErrchk(hipEventDestroy(stop)); cudaStatus = hipDeviceReset(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceReset failed!\n"); return cudaStatus; } return cudaStatus; } hipError_t computeSingle(const ComputeConfig& cfg,const ExtendedPoint* neutral,ExtendedPoint* initPoints,const NAF& coeff) { const int PRECOMP_SZ = (1 << (cfg.windowSz-1)); // Poet bod, kter je nutn pedpotat const int NUM_CURVES = cfg.numCurves; // Poet natench kivek int blcks = NUM_CURVES/CURVES_PER_BLOCK; const int NUM_BLOCKS = (blcks == 0 ? 1 : blcks); // Poet pouitch blok const int USE_DEVICE = cfg.deviceId; // ID zazen, kter bude pouito int devs = 0; gpuErrchk(hipGetDeviceCount(&devs)); if (USE_DEVICE < 0 || USE_DEVICE >= devs) { fprintf(stderr,"Launch failed: invalid device ID.\n"); return hipErrorInvalidDevice; } hipEvent_t start,stop; float totalTime = 0; void *swQw = NULL,*swPc = NULL,*swAx = NULL,*swCf = NULL; gpuErrchk(hipSetDevice(USE_DEVICE)); // Zjitn vlastnost zazen hipDeviceProp_t prop; gpuErrchk(hipGetDeviceProperties(&prop, 0)); // Oven, e Compute Capability je alespo 2.0 if (prop.major < 2) { fprintf(stderr,"Launch failed: compute capability of the device must be at least 2.0.\n"); return hipErrorInitializationError; } // Oven, e se vechny kivky vejdou do sdlen pamti if ((int)prop.sharedMemPerBlock*prop.multiProcessorCount < NUM_CURVES*CURVE_MEMORY_SIZE) { fprintf(stderr,"Launch failed: cannot fit curves into the shared memory.\n"); return hipErrorLaunchOutOfResources; } gpuErrchk(hipEventCreate(&start)); gpuErrchk(hipEventCreate(&stop)); // Alokace potebnch dat cuda_Malloc((void**)&swPc,NUM_CURVES*PRECOMP_SZ*4*MAX_BYTES); // Pedpotan body cuda_Malloc((void**)&swQw,NUM_CURVES*4*MAX_BYTES); // Pomocn bod cuda_Malloc((void**)&swAx,sizeof(ComputeConfig)); // Pomocn struktura cuda_Malloc((void**)&swCf,cfg.nafLen); // NAF rozvoj koeficientu // Pomocn struktura cuda_Memcpy(swAx,(void*)&cfg,sizeof(ComputeConfig),hipMemcpyHostToDevice); // NAF rozvoj koeficientu cuda_Memcpy(swCf,(void*)coeff.bits,cfg.nafLen,hipMemcpyHostToDevice); // Poten body VOL digit_t* iter = (digit_t*)swPc; for (int i = 0;i < NUM_CURVES;i++){ cuda_Memcpy((void*)(iter+0*NB_DIGITS),(void*)initPoints[i].X,MAX_BYTES,hipMemcpyHostToDevice); cuda_Memcpy((void*)(iter+1*NB_DIGITS),(void*)initPoints[i].Y,MAX_BYTES,hipMemcpyHostToDevice); cuda_Memcpy((void*)(iter+2*NB_DIGITS),(void*)initPoints[i].Z,MAX_BYTES,hipMemcpyHostToDevice); cuda_Memcpy((void*)(iter+3*NB_DIGITS),(void*)initPoints[i].T,MAX_BYTES,hipMemcpyHostToDevice); iter += 4*NB_DIGITS; } // Konfigurace kernel dim3 threadsPerBlock(NB_DIGITS,CURVES_PER_BLOCK); printf("Device name and ID : %s (%d)\n",prop.name,USE_DEVICE); printf("Execution configuration: %d x %d x %d\n",NUM_BLOCKS,CURVES_PER_BLOCK,NB_DIGITS); printf("--------------------------\n"); // Dal pedpotan body if (cfg.minus1) { START_MEASURE(start); hipLaunchKernelGGL(( precomputeT), dim3(NUM_BLOCKS),dim3(threadsPerBlock), 0, 0, (void*)swPc,(void*)iter,(void*)swAx); STOP_MEASURE("Precomputation phase",start,stop,totalTime); } else { START_MEASURE(start); hipLaunchKernelGGL(( precomputeE), dim3(NUM_BLOCKS),dim3(threadsPerBlock), 0, 0, (void*)swPc,(void*)iter,(void*)swAx); STOP_MEASURE("Precomputation phase",start,stop,totalTime); } // Do swQw nakoprovat neutrln prvek iter = (digit_t*)swQw; for (int i = 0;i < NUM_CURVES;++i){ cuda_Memcpy((void*)(iter+0*NB_DIGITS),(void*)neutral->X,MAX_BYTES,hipMemcpyHostToDevice); cuda_Memcpy((void*)(iter+1*NB_DIGITS),(void*)neutral->Y,MAX_BYTES,hipMemcpyHostToDevice); cuda_Memcpy((void*)(iter+2*NB_DIGITS),(void*)neutral->Z,MAX_BYTES,hipMemcpyHostToDevice); cuda_Memcpy((void*)(iter+3*NB_DIGITS),(void*)neutral->T,MAX_BYTES,hipMemcpyHostToDevice); iter += 4*NB_DIGITS; } gpuErrchk(hipDeviceSynchronize()); float pct = totalTime; if (cfg.minus1) { START_MEASURE(start); hipLaunchKernelGGL(( slidingWindowT), dim3(NUM_BLOCKS),dim3(threadsPerBlock), 0, 0, (void*)swQw,(void*)swPc,(void*)swAx,(void*)swCf); STOP_MEASURE("Computation phase",start,stop,totalTime); } else { START_MEASURE(start); hipLaunchKernelGGL(( slidingWindowE), dim3(NUM_BLOCKS),dim3(threadsPerBlock), 0, 0, (void*)swQw,(void*)swPc,(void*)swAx,(void*)swCf); STOP_MEASURE("Computation phase",start,stop,totalTime); } printf("--------------------------\n"); printf("Total time: %.3f ms\n",totalTime); cfg.cudaRunTime = totalTime-pct; // Nakoprovat vsledky zptky do pamti potae iter = (digit_t*)swQw; for (int i = 0;i < NUM_CURVES;i++){ cuda_Memcpy((void*)initPoints[i].X,(void*)(iter+0*NB_DIGITS),MAX_BYTES,hipMemcpyDeviceToHost); cuda_Memcpy((void*)initPoints[i].Y,(void*)(iter+1*NB_DIGITS),MAX_BYTES,hipMemcpyDeviceToHost); cuda_Memcpy((void*)initPoints[i].Z,(void*)(iter+2*NB_DIGITS),MAX_BYTES,hipMemcpyDeviceToHost); cuda_Memcpy((void*)initPoints[i].T,(void*)(iter+3*NB_DIGITS),MAX_BYTES,hipMemcpyDeviceToHost); iter += 4*NB_DIGITS; } // Zkontroluj chyby hipError_t cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) fprintf(stderr, "Launch failed: %s\n", hipGetErrorString(cudaStatus)); // Synchronizovat ve cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) fprintf(stderr, "Launch failed: %s\n", hipGetErrorString(cudaStatus)); // Uvolnit pam cuda_Free(swAx); cuda_Free(swQw); cuda_Free(swPc); cuda_Free(swCf); gpuErrchk(hipEventDestroy(start)); gpuErrchk(hipEventDestroy(stop)); cudaStatus = hipDeviceReset(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceReset failed!\n"); return cudaStatus; } return cudaStatus; }
1000e3d08367cc50ff887224a8f3f53ae6deb18b.cu
#include "kernel.h" #include "edwards.h" #include "twisted.h" // Vrátí výsek z NAF rozvoje __device__ int build(char* bits,unsigned int start,unsigned int end) { int ret = 0; for (unsigned int i = start;i <= end;i++) { ret += bits[i]*(1 << (i-start)); } return ret; } // Výpočet pomocí sliding window pro křivky s a=1 __global__ void slidingWindowE(void* pY,void* pPc,void* swAux,void* swCoeff) { PREPARE(); VOL digit_t* Qd = ((digit_t*)pY)+idx; // Nakopírování pracovních dat pro Y c_x1[threadIdx.x] = *(Qd+threadIdx.x+0*NB_DIGITS); // prvních 32 cifer patří k X c_y1[threadIdx.x] = *(Qd+threadIdx.x+1*NB_DIGITS); // dalších 32 cifer patří k Y c_z1[threadIdx.x] = *(Qd+threadIdx.x+2*NB_DIGITS); // dalších 32 k souřadnici Z c_t1[threadIdx.x] = *(Qd+threadIdx.x+3*NB_DIGITS); // ... a poslední k souřadnici T char* Cf = (char*)swCoeff; for (int i = ax->nafLen-1,u,s = 0;i >= 0;) { if (Cf[i] == 0) { edwardsDbl(); --i; } else { s = i - ax->windowSz + 1; s = s > 0 ? s : 0; while (!Cf[s]) ++s; for (int h = 1;h <= i-s+1;++h) { edwardsDbl(); } u = build(Cf,s,i); if (u > 0){ Qd = ((digit_t*)pPc)+idx+((u-1)/2)*NUM_CURVES*4*NB_DIGITS; edwardsAdd(); } else { Qd = ((digit_t*)pPc)+idx+((-u-1)/2)*NUM_CURVES*4*NB_DIGITS; edwardsSub(); } i = s-1; } } // Nakopírování pracovních dat zpátky do Y Qd = ((digit_t*)pY) + idx; *(Qd+threadIdx.x+0*NB_DIGITS) = c_x1[threadIdx.x]; // prvních 32 cifer patří k X *(Qd+threadIdx.x+1*NB_DIGITS) = c_y1[threadIdx.x]; // dalších 32 cifer patří k Y *(Qd+threadIdx.x+2*NB_DIGITS) = c_z1[threadIdx.x]; // dalších 32 k souřadnici Z *(Qd+threadIdx.x+3*NB_DIGITS) = c_t1[threadIdx.x]; // ... a poslední k souřadnici T __syncthreads(); } // Výpočet pomocí sliding window pro křivky s a=-1 __global__ void slidingWindowT(void* pY,void* pPc,void* swAux,void* swCoeff) { PREPARE(); VOL digit_t* Qd = ((digit_t*)pY)+idx; // Nakopírování pracovních dat pro Y c_x1[threadIdx.x] = *(Qd+threadIdx.x+0*NB_DIGITS); // prvních 32 cifer patří k X c_y1[threadIdx.x] = *(Qd+threadIdx.x+1*NB_DIGITS); // dalších 32 cifer patří k Y c_z1[threadIdx.x] = *(Qd+threadIdx.x+2*NB_DIGITS); // dalších 32 k souřadnici Z c_t1[threadIdx.x] = *(Qd+threadIdx.x+3*NB_DIGITS); // ... a poslední k souřadnici T char* Cf = (char*)swCoeff; for (int i = ax->nafLen-1,u,s = 0;i >= 0;) { if (Cf[i] == 0) { twistedDbl(); --i; } else { s = i - ax->windowSz + 1; s = s > 0 ? s : 0; while (!Cf[s]) ++s; for (int h = 1;h <= i-s+1;++h) { twistedDbl(); } u = build(Cf,s,i); if (u > 0){ Qd = ((digit_t*)pPc)+idx+((u-1)/2)*NUM_CURVES*4*NB_DIGITS; twistedAdd(); } else { Qd = ((digit_t*)pPc)+idx+((-u-1)/2)*NUM_CURVES*4*NB_DIGITS; twistedSub(); } i = s-1; } } // Nakopírování pracovních dat zpátky do Y Qd = ((digit_t*)pY) + idx; *(Qd+threadIdx.x+0*NB_DIGITS) = c_x1[threadIdx.x]; // prvních 32 cifer patří k X *(Qd+threadIdx.x+1*NB_DIGITS) = c_y1[threadIdx.x]; // dalších 32 cifer patří k Y *(Qd+threadIdx.x+2*NB_DIGITS) = c_z1[threadIdx.x]; // dalších 32 k souřadnici Z *(Qd+threadIdx.x+3*NB_DIGITS) = c_t1[threadIdx.x]; // ... a poslední k souřadnici T __syncthreads(); } __global__ void precomputeE(void* pX,void* pCube,void* swAux) { PREPARE(); VOL digit_t* Qd = ((digit_t*)pX) + idx; VOL digit_t* out = ((digit_t*)pCube) + idx; // Nakopírování pracovních dat pro Y c_x1[threadIdx.x] = *(Qd+threadIdx.x+0*NB_DIGITS); // prvních 32 cifer patří k X c_y1[threadIdx.x] = *(Qd+threadIdx.x+1*NB_DIGITS); // dalších 32 cifer patří k Y c_z1[threadIdx.x] = *(Qd+threadIdx.x+2*NB_DIGITS); // dalších 32 k souřadnici Z c_t1[threadIdx.x] = *(Qd+threadIdx.x+3*NB_DIGITS); // ... a poslední k souřadnici T edwardsDbl(); for (int i = 1; i < (1 << (ax->windowSz-1));++i) { edwardsAdd(); // Výsledek na své místo *(out+threadIdx.x+0*NB_DIGITS) = c_x1[threadIdx.x]; // prvních 32 cifer patří k X *(out+threadIdx.x+1*NB_DIGITS) = c_y1[threadIdx.x]; // dalších 32 cifer patří k Y *(out+threadIdx.x+2*NB_DIGITS) = c_z1[threadIdx.x]; // dalších 32 k souřadnici Z *(out+threadIdx.x+3*NB_DIGITS) = c_t1[threadIdx.x]; // ... a poslední k souřadnici T out += NUM_CURVES*4*NB_DIGITS; __syncthreads(); edwardsAdd(); } } __global__ void precomputeT(void* pX,void* pCube,void* swAux) { PREPARE(); VOL digit_t* Qd = ((digit_t*)pX) + idx; VOL digit_t* out = ((digit_t*)pCube) + idx; // Nakopírování pracovních dat pro Y c_x1[threadIdx.x] = *(Qd+threadIdx.x+0*NB_DIGITS); // prvních 32 cifer patří k X c_y1[threadIdx.x] = *(Qd+threadIdx.x+1*NB_DIGITS); // dalších 32 cifer patří k Y c_z1[threadIdx.x] = *(Qd+threadIdx.x+2*NB_DIGITS); // dalších 32 k souřadnici Z c_t1[threadIdx.x] = *(Qd+threadIdx.x+3*NB_DIGITS); // ... a poslední k souřadnici T twistedDbl(); for (int i = 1; i < (1 << (ax->windowSz-1));++i) { twistedAdd(); // Výsledek na své místo *(out+threadIdx.x+0*NB_DIGITS) = c_x1[threadIdx.x]; // prvních 32 cifer patří k X *(out+threadIdx.x+1*NB_DIGITS) = c_y1[threadIdx.x]; // dalších 32 cifer patří k Y *(out+threadIdx.x+2*NB_DIGITS) = c_z1[threadIdx.x]; // dalších 32 k souřadnici Z *(out+threadIdx.x+3*NB_DIGITS) = c_t1[threadIdx.x]; // ... a poslední k souřadnici T out += NUM_CURVES*4*NB_DIGITS; __syncthreads(); twistedAdd(); } } cudaError_t computeMixed(const ComputeConfig& cfg,const ExtendedPoint* neutral,ExtendedPoint* initPoints,const NAF& coeff) { const int PRECOMP_SZ = (1 << (cfg.windowSz-1)); // Počet bodů, které je nutné předpočítat const int NUM_CURVES = cfg.numCurves; // Počet načtených křivek int blcks = NUM_CURVES/CURVES_PER_BLOCK; const int NUM_BLOCKS = (blcks == 0 ? 1 : blcks)/2; // Počet použitých bloků const int USE_DEVICE = cfg.deviceId; // ID zařízení, které bude použito int devs = 0; gpuErrchk(cudaGetDeviceCount(&devs)); if (USE_DEVICE < 0 || USE_DEVICE >= devs) { fprintf(stderr,"Launch failed: invalid device ID.\n"); return cudaErrorInvalidDevice; } cudaEvent_t start,stop; float totalTime = 0; void *swQw = NULL,*swPc = NULL,*swAx = NULL,*swCf = NULL; gpuErrchk(cudaSetDevice(USE_DEVICE)); // Zjištění vlastností zařízení cudaDeviceProp prop; gpuErrchk(cudaGetDeviceProperties(&prop, 0)); // Ověření, že Compute Capability je alespoň 2.0 if (prop.major < 2) { fprintf(stderr,"Launch failed: compute capability of the device must be at least 2.0.\n"); return cudaErrorInitializationError; } // Ověření, že se všechny křivky vejdou do sdílené paměti if ((int)prop.sharedMemPerBlock*prop.multiProcessorCount < NUM_CURVES*CURVE_MEMORY_SIZE) { fprintf(stderr,"Launch failed: cannot fit curves into the shared memory.\n"); return cudaErrorLaunchOutOfResources; } // Vytvořit eventy pro měření času gpuErrchk(cudaEventCreate(&start)); gpuErrchk(cudaEventCreate(&stop)); // Alokace potřebných dat cuda_Malloc((void**)&swPc,NUM_CURVES*PRECOMP_SZ*4*MAX_BYTES); // Předpočítané body cuda_Malloc((void**)&swQw,NUM_CURVES*4*MAX_BYTES); // Pomocný bod cuda_Malloc((void**)&swAx,sizeof(ComputeConfig)); // Pomocná struktura cuda_Malloc((void**)&swCf,cfg.nafLen); // NAF rozvoj koeficientu // Pomocná struktura cuda_Memcpy(swAx,(void*)&cfg,sizeof(ComputeConfig),cudaMemcpyHostToDevice); // NAF rozvoj koeficientu cuda_Memcpy(swCf,(void*)coeff.bits,cfg.nafLen,cudaMemcpyHostToDevice); // Počáteční body VOL digit_t* iter = (digit_t*)swPc; for (int i = 0;i < NUM_CURVES;i++){ cuda_Memcpy((void*)(iter+0*NB_DIGITS),(void*)initPoints[i].X,MAX_BYTES,cudaMemcpyHostToDevice); cuda_Memcpy((void*)(iter+1*NB_DIGITS),(void*)initPoints[i].Y,MAX_BYTES,cudaMemcpyHostToDevice); cuda_Memcpy((void*)(iter+2*NB_DIGITS),(void*)initPoints[i].Z,MAX_BYTES,cudaMemcpyHostToDevice); cuda_Memcpy((void*)(iter+3*NB_DIGITS),(void*)initPoints[i].T,MAX_BYTES,cudaMemcpyHostToDevice); iter += 4*NB_DIGITS; } // Konfigurace kernelů dim3 threadsPerBlock(NB_DIGITS,CURVES_PER_BLOCK); printf("Device name and ID : %s (%d)\n",prop.name,USE_DEVICE); printf("Device compute capability: %d.%d\n",prop.major,prop.minor); printf("Execution configuration: %d x %d x %d\n",NUM_BLOCKS,CURVES_PER_BLOCK,NB_DIGITS); printf("--------------------------\n"); // Vytvoření streamů cudaStream_t edwardsStream,twistedStream; gpuErrchk(cudaStreamCreate(&edwardsStream)); gpuErrchk(cudaStreamCreate(&twistedStream)); // Startovací adresy pro stream s Edwardsovými křivkami void* swPcE = ((digit_t*)swPc)+NUM_CURVES*2*NB_DIGITS; VOL digit_t* iterE = iter+NUM_CURVES*2*NB_DIGITS; // Další předpočítané body START_MEASURE(start); precomputeT<<<NUM_BLOCKS,threadsPerBlock,0,twistedStream>>>((void*)swPc, (void*)iter, (void*)swAx); precomputeE<<<NUM_BLOCKS,threadsPerBlock,0,edwardsStream>>>((void*)swPcE,(void*)iterE,(void*)swAx); STOP_MEASURE("Precomputation phase",start,stop,totalTime); // Do swQw nakopírovat neutrální prvek iter = (digit_t*)swQw; for (int i = 0;i < NUM_CURVES;++i){ cuda_Memcpy((void*)(iter+0*NB_DIGITS),(void*)neutral->X,MAX_BYTES,cudaMemcpyHostToDevice); cuda_Memcpy((void*)(iter+1*NB_DIGITS),(void*)neutral->Y,MAX_BYTES,cudaMemcpyHostToDevice); cuda_Memcpy((void*)(iter+2*NB_DIGITS),(void*)neutral->Z,MAX_BYTES,cudaMemcpyHostToDevice); cuda_Memcpy((void*)(iter+3*NB_DIGITS),(void*)neutral->T,MAX_BYTES,cudaMemcpyHostToDevice); iter += 4*NB_DIGITS; } gpuErrchk(cudaDeviceSynchronize()); // Startovací adresy pro stream s Edwardsovými křivkami swPcE = ((digit_t*)swPc)+NUM_CURVES*2*NB_DIGITS; void* swQwE = ((digit_t*)swQw)+NUM_CURVES*2*NB_DIGITS; float pct = totalTime; START_MEASURE(start); slidingWindowT<<<NUM_BLOCKS,threadsPerBlock,0,twistedStream>>>((void*)swQw, (void*)swPc, (void*)swAx,(void*)swCf); slidingWindowE<<<NUM_BLOCKS,threadsPerBlock,0,edwardsStream>>>((void*)swQwE,(void*)swPcE,(void*)swAx,(void*)swCf); STOP_MEASURE("Computation phase",start,stop,totalTime); printf("--------------------------\n"); printf("Total time: %.3f ms\n",totalTime); cfg.cudaRunTime = totalTime-pct; gpuErrchk(cudaDeviceSynchronize()); gpuErrchk(cudaStreamDestroy(twistedStream)); gpuErrchk(cudaStreamDestroy(edwardsStream)); // Nakopírovat výsledky zpátky do paměti počítače iter = (digit_t*)swQw; for (int i = 0;i < NUM_CURVES;i++){ cuda_Memcpy((void*)initPoints[i].X,(void*)(iter+0*NB_DIGITS),MAX_BYTES,cudaMemcpyDeviceToHost); cuda_Memcpy((void*)initPoints[i].Y,(void*)(iter+1*NB_DIGITS),MAX_BYTES,cudaMemcpyDeviceToHost); cuda_Memcpy((void*)initPoints[i].Z,(void*)(iter+2*NB_DIGITS),MAX_BYTES,cudaMemcpyDeviceToHost); cuda_Memcpy((void*)initPoints[i].T,(void*)(iter+3*NB_DIGITS),MAX_BYTES,cudaMemcpyDeviceToHost); iter += 4*NB_DIGITS; } // Zkontroluj chyby cudaError_t cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) fprintf(stderr, "Launch failed: %s\n", cudaGetErrorString(cudaStatus)); // Synchronizovat vše cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) fprintf(stderr, "Launch failed: %s\n", cudaGetErrorString(cudaStatus)); // Uvolnit paměť cuda_Free(swAx); cuda_Free(swQw); cuda_Free(swPc); cuda_Free(swCf); gpuErrchk(cudaEventDestroy(start)); gpuErrchk(cudaEventDestroy(stop)); cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!\n"); return cudaStatus; } return cudaStatus; } cudaError_t computeSingle(const ComputeConfig& cfg,const ExtendedPoint* neutral,ExtendedPoint* initPoints,const NAF& coeff) { const int PRECOMP_SZ = (1 << (cfg.windowSz-1)); // Počet bodů, které je nutné předpočítat const int NUM_CURVES = cfg.numCurves; // Počet načtených křivek int blcks = NUM_CURVES/CURVES_PER_BLOCK; const int NUM_BLOCKS = (blcks == 0 ? 1 : blcks); // Počet použitých bloků const int USE_DEVICE = cfg.deviceId; // ID zařízení, které bude použito int devs = 0; gpuErrchk(cudaGetDeviceCount(&devs)); if (USE_DEVICE < 0 || USE_DEVICE >= devs) { fprintf(stderr,"Launch failed: invalid device ID.\n"); return cudaErrorInvalidDevice; } cudaEvent_t start,stop; float totalTime = 0; void *swQw = NULL,*swPc = NULL,*swAx = NULL,*swCf = NULL; gpuErrchk(cudaSetDevice(USE_DEVICE)); // Zjištění vlastností zařízení cudaDeviceProp prop; gpuErrchk(cudaGetDeviceProperties(&prop, 0)); // Ověření, že Compute Capability je alespoň 2.0 if (prop.major < 2) { fprintf(stderr,"Launch failed: compute capability of the device must be at least 2.0.\n"); return cudaErrorInitializationError; } // Ověření, že se všechny křivky vejdou do sdílené paměti if ((int)prop.sharedMemPerBlock*prop.multiProcessorCount < NUM_CURVES*CURVE_MEMORY_SIZE) { fprintf(stderr,"Launch failed: cannot fit curves into the shared memory.\n"); return cudaErrorLaunchOutOfResources; } gpuErrchk(cudaEventCreate(&start)); gpuErrchk(cudaEventCreate(&stop)); // Alokace potřebných dat cuda_Malloc((void**)&swPc,NUM_CURVES*PRECOMP_SZ*4*MAX_BYTES); // Předpočítané body cuda_Malloc((void**)&swQw,NUM_CURVES*4*MAX_BYTES); // Pomocný bod cuda_Malloc((void**)&swAx,sizeof(ComputeConfig)); // Pomocná struktura cuda_Malloc((void**)&swCf,cfg.nafLen); // NAF rozvoj koeficientu // Pomocná struktura cuda_Memcpy(swAx,(void*)&cfg,sizeof(ComputeConfig),cudaMemcpyHostToDevice); // NAF rozvoj koeficientu cuda_Memcpy(swCf,(void*)coeff.bits,cfg.nafLen,cudaMemcpyHostToDevice); // Počáteční body VOL digit_t* iter = (digit_t*)swPc; for (int i = 0;i < NUM_CURVES;i++){ cuda_Memcpy((void*)(iter+0*NB_DIGITS),(void*)initPoints[i].X,MAX_BYTES,cudaMemcpyHostToDevice); cuda_Memcpy((void*)(iter+1*NB_DIGITS),(void*)initPoints[i].Y,MAX_BYTES,cudaMemcpyHostToDevice); cuda_Memcpy((void*)(iter+2*NB_DIGITS),(void*)initPoints[i].Z,MAX_BYTES,cudaMemcpyHostToDevice); cuda_Memcpy((void*)(iter+3*NB_DIGITS),(void*)initPoints[i].T,MAX_BYTES,cudaMemcpyHostToDevice); iter += 4*NB_DIGITS; } // Konfigurace kernelů dim3 threadsPerBlock(NB_DIGITS,CURVES_PER_BLOCK); printf("Device name and ID : %s (%d)\n",prop.name,USE_DEVICE); printf("Execution configuration: %d x %d x %d\n",NUM_BLOCKS,CURVES_PER_BLOCK,NB_DIGITS); printf("--------------------------\n"); // Další předpočítané body if (cfg.minus1) { START_MEASURE(start); precomputeT<<<NUM_BLOCKS,threadsPerBlock>>>((void*)swPc,(void*)iter,(void*)swAx); STOP_MEASURE("Precomputation phase",start,stop,totalTime); } else { START_MEASURE(start); precomputeE<<<NUM_BLOCKS,threadsPerBlock>>>((void*)swPc,(void*)iter,(void*)swAx); STOP_MEASURE("Precomputation phase",start,stop,totalTime); } // Do swQw nakopírovat neutrální prvek iter = (digit_t*)swQw; for (int i = 0;i < NUM_CURVES;++i){ cuda_Memcpy((void*)(iter+0*NB_DIGITS),(void*)neutral->X,MAX_BYTES,cudaMemcpyHostToDevice); cuda_Memcpy((void*)(iter+1*NB_DIGITS),(void*)neutral->Y,MAX_BYTES,cudaMemcpyHostToDevice); cuda_Memcpy((void*)(iter+2*NB_DIGITS),(void*)neutral->Z,MAX_BYTES,cudaMemcpyHostToDevice); cuda_Memcpy((void*)(iter+3*NB_DIGITS),(void*)neutral->T,MAX_BYTES,cudaMemcpyHostToDevice); iter += 4*NB_DIGITS; } gpuErrchk(cudaDeviceSynchronize()); float pct = totalTime; if (cfg.minus1) { START_MEASURE(start); slidingWindowT<<<NUM_BLOCKS,threadsPerBlock>>>((void*)swQw,(void*)swPc,(void*)swAx,(void*)swCf); STOP_MEASURE("Computation phase",start,stop,totalTime); } else { START_MEASURE(start); slidingWindowE<<<NUM_BLOCKS,threadsPerBlock>>>((void*)swQw,(void*)swPc,(void*)swAx,(void*)swCf); STOP_MEASURE("Computation phase",start,stop,totalTime); } printf("--------------------------\n"); printf("Total time: %.3f ms\n",totalTime); cfg.cudaRunTime = totalTime-pct; // Nakopírovat výsledky zpátky do paměti počítače iter = (digit_t*)swQw; for (int i = 0;i < NUM_CURVES;i++){ cuda_Memcpy((void*)initPoints[i].X,(void*)(iter+0*NB_DIGITS),MAX_BYTES,cudaMemcpyDeviceToHost); cuda_Memcpy((void*)initPoints[i].Y,(void*)(iter+1*NB_DIGITS),MAX_BYTES,cudaMemcpyDeviceToHost); cuda_Memcpy((void*)initPoints[i].Z,(void*)(iter+2*NB_DIGITS),MAX_BYTES,cudaMemcpyDeviceToHost); cuda_Memcpy((void*)initPoints[i].T,(void*)(iter+3*NB_DIGITS),MAX_BYTES,cudaMemcpyDeviceToHost); iter += 4*NB_DIGITS; } // Zkontroluj chyby cudaError_t cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) fprintf(stderr, "Launch failed: %s\n", cudaGetErrorString(cudaStatus)); // Synchronizovat vše cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) fprintf(stderr, "Launch failed: %s\n", cudaGetErrorString(cudaStatus)); // Uvolnit paměť cuda_Free(swAx); cuda_Free(swQw); cuda_Free(swPc); cuda_Free(swCf); gpuErrchk(cudaEventDestroy(start)); gpuErrchk(cudaEventDestroy(stop)); cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!\n"); return cudaStatus; } return cudaStatus; }
2acaae0bc13a497924655b0e348a508fd3d8ccd7.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/hip/HIPBlas.h> #include <ATen/hip/HIPContext.h> #include <ATen/native/DilatedConvolutionUtils.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <tuple> #include "ATen/ATen.h" #include "ATen/native/hip/im2col.cuh" #include "ATen/native/hip/vol2col.cuh" namespace at { namespace native { namespace { // hyper-volume to column, CUDA template <typename Dtype, int64_t dim> void hvol2col( hipStream_t stream, const Dtype* data_hvol, const int channels, const IntArrayRef input_size, const IntArrayRef output_size, const IntArrayRef kernel_size, const IntArrayRef stride_size, const IntArrayRef pad_size, const IntArrayRef dilation_size, Dtype* data_col) { if (dim == 3) { vol2col<Dtype>( stream, data_hvol, channels, input_size[0], input_size[1], input_size[2], output_size[0], output_size[1], output_size[2], kernel_size[0], kernel_size[1], kernel_size[2], pad_size[0], pad_size[1], pad_size[2], stride_size[0], stride_size[1], stride_size[2], dilation_size[0], dilation_size[1], dilation_size[2], data_col); } if (dim == 2) { im2col<Dtype>( stream, data_hvol, channels, input_size[0], input_size[1], output_size[0], output_size[1], kernel_size[0], kernel_size[1], pad_size[0], pad_size[1], stride_size[0], stride_size[1], dilation_size[0], dilation_size[1], data_col); } } // column to hyper-volume, CUDA template <typename Dtype, int64_t dim> void col2hvol( hipStream_t stream, const Dtype* data_col, const int channels, const IntArrayRef input_size, const IntArrayRef output_size, const IntArrayRef kernel_size, const IntArrayRef stride_size, const IntArrayRef pad_size, const IntArrayRef dilation_size, Dtype* data_hvol) { if (dim == 3) { col2vol<Dtype, Dtype>( stream, data_col, channels, input_size[0], input_size[1], input_size[2], output_size[0], output_size[1], output_size[2], kernel_size[0], kernel_size[1], kernel_size[2], pad_size[0], pad_size[1], pad_size[2], stride_size[0], stride_size[1], stride_size[2], dilation_size[0], dilation_size[1], dilation_size[2], data_hvol); } if (dim == 2) { col2im<Dtype, Dtype>( stream, data_col, channels, input_size[0], input_size[1], output_size[0], output_size[1], kernel_size[0], kernel_size[1], pad_size[0], pad_size[1], stride_size[0], stride_size[1], dilation_size[0], dilation_size[1], data_hvol); } } /* check tensor data locations */ void slow_conv_dilated_location_check( const Tensor& input, const Tensor& weight, const Tensor& bias, const Tensor& grad_output) { // checking data locations of user-provided tensor arguments TensorArg input_arg{input, "input", 2}, weight_arg{weight, "weight", 3}, bias_arg{bias, "bias", 4}, grad_output_arg{grad_output, "grad_output", 5}; checkAllSameGPU("slow_conv_dilated_all_cuda_template", {input_arg, weight_arg}); if (bias.defined()) { checkAllSameGPU("slow_conv_dilated_all_cuda_template", {input_arg, bias_arg}); } if (grad_output.defined()) { checkAllSameGPU( "slow_conv_dilated_all_cuda_template", {input_arg, grad_output_arg}); } // we are not checking the data locations of other tensor // arguments such as output, grad_input, etc because of these are // allocated based on input options and hence these tensors always // have the same data location as of input tensor. } /* slow_conv_dilated_all_cuda_template Main worker. Computes tensors output, grad_input, grad_weight, and/or grad_bias if defined, respectively. */ template <int64_t dim> void slow_conv_dilated_all_cuda_template( Tensor& output, const Tensor& input, const Tensor& weight, const Tensor& bias, const Tensor& grad_output, Tensor& grad_input, Tensor& grad_weight, Tensor& grad_bias, IntArrayRef kernel_size, IntArrayRef stride_size, IntArrayRef pad_size, IntArrayRef dilation_size) { slow_conv_dilated_location_check(input, weight, bias, grad_output); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); auto options = input.options(); // The rear part of input tensor sizes: auto input_size = input.sizes().slice(2); // The rear part of output tensor sizes: auto output_size = internal::get_output_size<dim>( input, kernel_size, stride_size, pad_size, dilation_size); int64_t batchSize = input.size(0); int64_t nInputPlane = weight.size(1); int64_t nOutputPlane = weight.size(0); // Temporary buffers: int64_t m = std::accumulate( kernel_size.begin(), kernel_size.end(), 1, std::multiplies<int64_t>()); int64_t output_vsize = std::accumulate( output_size.begin(), output_size.end(), 1, std::multiplies<int64_t>()); Tensor columns = at::empty({0}, options); if (output.defined() || grad_weight.defined() || grad_input.defined()) { columns.resize_({nInputPlane * m, output_vsize}); } // Initialize if (grad_weight.defined()) { grad_weight.zero_(); } if (grad_bias.defined()) { grad_bias.zero_(); } if (output.defined() && !bias.defined()) { output.zero_(); } #ifdef __HIP_PLATFORM_HCC__ /* When using ROCm, the sum evaluation is inaccurate for double tensors. The reason is currently unknown. Hence, we use gemv for computing `grad_output_n.sum(dims)` until the ROCm-sum issue is resolved. */ Tensor ones = at::empty({0}, options); if (grad_bias.defined()) { ones.resize_({output_vsize}); ones.fill_(1); } /* MSVC does not like #ifdef-s inside the CPP macro AT_DISPATCH_FLOATING_TYPES_AND_HALF. So, we define the code branching outside the CPP macro: */ #define CALCULATE_GRAD_BIAS \ at::cuda::blas::gemv<scalar_t>( \ /*trans=*/'t', \ /* m=*/output_vsize, \ /* n=*/nOutputPlane, \ /*alpha=*/ScalarConvert<int, scalar_t>::to(1), \ /* A=*/grad_output_n.data_ptr<scalar_t>(), \ /* lda=*/output_vsize, \ /* x=*/ones.data_ptr<scalar_t>(), \ /* incx=*/1, \ /* beta=*/ScalarConvert<int, scalar_t>::to(1), \ /* y=*/grad_bias.data_ptr<scalar_t>(), \ /* incy=*/1) #else #define CALCULATE_GRAD_BIAS grad_bias += grad_output_n.sum(dims) #endif // Helpers Tensor grad_output_n; std::vector<int64_t> dims(dim); std::iota(dims.begin(), dims.end(), 1); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "slow_conv_dilated<>", [&] { // For each elt in batch, do: for (int elt = 0; elt < batchSize; elt++) { // Matrix multiply per output: Tensor input_n = input.select(0, elt); // Output if (output.defined()) { Tensor output_n = output.select(0, elt); if (bias.defined()) { /* For gemm argument derivation, see slow_conv_dilated_all_cuda_template in ATen/native/DilatedConvolution.cpp */ for (int n = 0; n < nOutputPlane; n++) { output_n.select(0, n).fill_(bias[n]); } } // Extract columns: hvol2col<scalar_t, dim>( stream, input_n.data_ptr<scalar_t>(), nInputPlane, input_size, output_size, kernel_size, stride_size, pad_size, dilation_size, columns.data_ptr<scalar_t>()); /* For gemm argument derivation, see slow_conv_dilated_all_cuda_template in ATen/native/DilatedConvolution.cpp */ at::cuda::blas::gemm<scalar_t>( /*transa=*/'n', /*transb=*/'n', /* m=*/columns.size(1), /* n=*/nOutputPlane, /* k=*/columns.size(0), /* alpha=*/ScalarConvert<int, scalar_t>::to(1), /* A=*/columns.data_ptr<scalar_t>(), /* lda=*/columns.size(1), /* B=*/weight.data_ptr<scalar_t>(), /* ldb=*/columns.size(0), /* beta=*/ScalarConvert<int, scalar_t>::to(1), /* C=*/output_n.data_ptr<scalar_t>(), /* ldc=*/columns.size(1)); } else { // All gradients grad_output_n = grad_output.select(0, elt); } // Gradient of input: if (grad_input.defined()) { /* For gemm argument derivation, see slow_conv_dilated_all_cuda_template in ATen/native/DilatedConvolution.cpp */ at::cuda::blas::gemm<scalar_t>( /*transa=*/'n', /*transb=*/'t', /* m=*/columns.size(1), /* n=*/columns.size(0), /* k=*/nOutputPlane, /* alpha=*/ScalarConvert<int, scalar_t>::to(1), /* A=*/grad_output_n.data_ptr<scalar_t>(), /* lda=*/columns.size(1), /* B=*/weight.data_ptr<scalar_t>(), /* ldb=*/columns.size(0), /* beta=*/ScalarConvert<int, scalar_t>::to(0), /* C=*/columns.data_ptr<scalar_t>(), /* ldc=*/columns.size(1)); // Unpack columns back into input: Tensor grad_input_n = grad_input.select(0, elt); col2hvol<scalar_t, dim>( stream, columns.data_ptr<scalar_t>(), nInputPlane, input_size, output_size, kernel_size, stride_size, pad_size, dilation_size, grad_input_n.data_ptr<scalar_t>()); } // Gradient of weight: if (grad_weight.defined()) { // Extract columns: hvol2col<scalar_t, dim>( stream, input_n.data_ptr<scalar_t>(), nInputPlane, input_size, output_size, kernel_size, stride_size, pad_size, dilation_size, columns.data_ptr<scalar_t>()); scalar_t scale = ScalarConvert<int, scalar_t>::to( 1); // TODO: expose as argument? /* For gemm argument derivation, see slow_conv_dilated_all_cuda_template in ATen/native/DilatedConvolution.cpp */ at::cuda::blas::gemm<scalar_t>( /*transa=*/'t', /*transb=*/'n', /* m=*/columns.size(0), /* n=*/nOutputPlane, /* k=*/columns.size(1), /* alpha=*/scale, /* A=*/columns.data_ptr<scalar_t>(), /* lda=*/columns.size(1), /* B=*/grad_output_n.data_ptr<scalar_t>(), /* ldb=*/columns.size(1), /* beta=*/ScalarConvert<int, scalar_t>::to(1), /* C=*/grad_weight.data_ptr<scalar_t>(), /* ldc=*/columns.size(0)); } // Gradient of bias: if (grad_bias.defined()) { /* For gemv argument derivation, see slow_conv_dilated_all_cpu_template in ATen/native/DilatedConvolution.cpp */ CALCULATE_GRAD_BIAS; /* MSVC does not like #ifdef-s inside the CPP macros, see above. */ /* TODO: when scale != 1 is introduced then use: grad_bias += scale * grad_output_n.sum(dims); */ } } }); } // slow_conv_dilated_all_cuda_template } // namespace Tensor slow_conv_dilated2d_cuda( const Tensor& input, const Tensor& weight, IntArrayRef kernel_size, const Tensor& bias, IntArrayRef stride_size, IntArrayRef pad_size, IntArrayRef dilation_size) { Tensor undefined; internal::slow_conv_dilated_shape_check<2>( input, weight, bias, undefined, kernel_size, stride_size, pad_size, dilation_size); auto is_batch = input.dim() == 4; auto options = input.options(); // calculate output tensor size auto output_size = internal::get_output_size<2>( input, weight, kernel_size, stride_size, pad_size, dilation_size); // template function assumes batched tensors. unsqueeze(0) will // insert batch dimension without affecting the original tensor. const Tensor input_ = (is_batch ? input.contiguous() : input.contiguous().unsqueeze(0)); const Tensor weight_ = weight.contiguous(); const Tensor bias_ = (bias.defined() ? bias.contiguous() : undefined); Tensor output = at::empty(output_size, options); Tensor output_ = (is_batch ? output : output.unsqueeze(0)); slow_conv_dilated_all_cuda_template<2>( output_, input_, weight_, bias_, undefined, undefined, undefined, undefined, kernel_size, stride_size, pad_size, dilation_size); return output; } std::tuple<Tensor, Tensor, Tensor> slow_conv_dilated2d_backward_cuda( const Tensor& grad_output, const Tensor& input, const Tensor& weight, IntArrayRef kernel_size, IntArrayRef stride_size, IntArrayRef pad_size, IntArrayRef dilation_size, const std::array<bool, 3ul> output_mask) { Tensor undefined; internal::slow_conv_dilated_shape_check<2>( input, weight, undefined, grad_output, kernel_size, stride_size, pad_size, dilation_size); auto is_batch = input.dim() == 4; auto options = grad_output.options(); // template function assumes batched tensors. unsqueeze(0) will // insert batch dimension without affecting the original tensor. const Tensor grad_output_ = (is_batch ? grad_output.contiguous() : grad_output.contiguous().unsqueeze(0)); const Tensor input_ = (is_batch ? input.contiguous() : input.contiguous().unsqueeze(0)); const Tensor weight_ = weight.contiguous(); // compute only gradients for which the corresponding output_mask is true: Tensor grad_input = (output_mask[0] ? at::empty(input.sizes(), options) : undefined); Tensor grad_weight = (output_mask[1] ? at::empty(weight.sizes(), options) : undefined); Tensor grad_bias = (output_mask[2] ? at::empty(weight.size(0), options) : undefined); Tensor grad_input_ = (output_mask[0] ? (is_batch ? grad_input : grad_input.unsqueeze(0)) : undefined); slow_conv_dilated_all_cuda_template<2>( undefined, input_, weight_, undefined, grad_output_, grad_input, grad_weight, grad_bias, kernel_size, stride_size, pad_size, dilation_size); return std::tie(grad_input, grad_weight, grad_bias); } Tensor slow_conv_dilated3d_cuda( const Tensor& input, const Tensor& weight, IntArrayRef kernel_size, const Tensor& bias, IntArrayRef stride_size, IntArrayRef pad_size, IntArrayRef dilation_size) { Tensor undefined; internal::slow_conv_dilated_shape_check<3>( input, weight, bias, undefined, kernel_size, stride_size, pad_size, dilation_size); auto is_batch = input.dim() == 5; auto options = input.options(); // calculate output tensor size auto output_size = internal::get_output_size<3>( input, weight, kernel_size, stride_size, pad_size, dilation_size); // template function assumes batched tensors. unsqueeze(0) will // insert batch dimension without affecting the original tensor. const Tensor input_ = (is_batch ? input.contiguous() : input.contiguous().unsqueeze(0)); const Tensor weight_ = weight.contiguous(); const Tensor bias_ = (bias.defined() ? bias.contiguous() : undefined); Tensor output = at::empty(output_size, options); Tensor output_ = (is_batch ? output : output.unsqueeze(0)); slow_conv_dilated_all_cuda_template<3>( output, input_, weight_, bias_, undefined, undefined, undefined, undefined, kernel_size, stride_size, pad_size, dilation_size); return output; } std::tuple<Tensor, Tensor, Tensor> slow_conv_dilated3d_backward_cuda( const Tensor& grad_output, const Tensor& input, const Tensor& weight, IntArrayRef kernel_size, IntArrayRef stride_size, IntArrayRef pad_size, IntArrayRef dilation_size, const std::array<bool, 3ul> output_mask) { Tensor undefined; internal::slow_conv_dilated_shape_check<3>( input, weight, undefined, grad_output, kernel_size, stride_size, pad_size, dilation_size); auto is_batch = input.dim() == 5; auto options = grad_output.options(); // template function assumes batched tensors. unsqueeze(0) will // insert batch dimension without affecting the original tensor. const Tensor grad_output_ = (is_batch ? grad_output.contiguous() : grad_output.contiguous().unsqueeze(0)); const Tensor input_ = (is_batch ? input.contiguous() : input.contiguous().unsqueeze(0)); const Tensor weight_ = weight.contiguous(); // compute only gradients for which the corresponding output_mask is true: Tensor grad_input = (output_mask[0] ? at::empty(input.sizes(), options) : undefined); Tensor grad_weight = (output_mask[1] ? at::empty(weight.sizes(), options) : undefined); Tensor grad_bias = (output_mask[2] ? at::empty(weight.size(0), options) : undefined); Tensor grad_input_ = (output_mask[0] ? (is_batch ? grad_input : grad_input.unsqueeze(0)) : undefined); slow_conv_dilated_all_cuda_template<3>( undefined, input_, weight_, undefined, grad_output_, grad_input, grad_weight, grad_bias, kernel_size, stride_size, pad_size, dilation_size); return std::tie(grad_input, grad_weight, grad_bias); } } // namespace native } // namespace at
2acaae0bc13a497924655b0e348a508fd3d8ccd7.cu
#include <ATen/cuda/CUDABlas.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/native/DilatedConvolutionUtils.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <tuple> #include "ATen/ATen.h" #include "ATen/native/cuda/im2col.cuh" #include "ATen/native/cuda/vol2col.cuh" namespace at { namespace native { namespace { // hyper-volume to column, CUDA template <typename Dtype, int64_t dim> void hvol2col( cudaStream_t stream, const Dtype* data_hvol, const int channels, const IntArrayRef input_size, const IntArrayRef output_size, const IntArrayRef kernel_size, const IntArrayRef stride_size, const IntArrayRef pad_size, const IntArrayRef dilation_size, Dtype* data_col) { if (dim == 3) { vol2col<Dtype>( stream, data_hvol, channels, input_size[0], input_size[1], input_size[2], output_size[0], output_size[1], output_size[2], kernel_size[0], kernel_size[1], kernel_size[2], pad_size[0], pad_size[1], pad_size[2], stride_size[0], stride_size[1], stride_size[2], dilation_size[0], dilation_size[1], dilation_size[2], data_col); } if (dim == 2) { im2col<Dtype>( stream, data_hvol, channels, input_size[0], input_size[1], output_size[0], output_size[1], kernel_size[0], kernel_size[1], pad_size[0], pad_size[1], stride_size[0], stride_size[1], dilation_size[0], dilation_size[1], data_col); } } // column to hyper-volume, CUDA template <typename Dtype, int64_t dim> void col2hvol( cudaStream_t stream, const Dtype* data_col, const int channels, const IntArrayRef input_size, const IntArrayRef output_size, const IntArrayRef kernel_size, const IntArrayRef stride_size, const IntArrayRef pad_size, const IntArrayRef dilation_size, Dtype* data_hvol) { if (dim == 3) { col2vol<Dtype, Dtype>( stream, data_col, channels, input_size[0], input_size[1], input_size[2], output_size[0], output_size[1], output_size[2], kernel_size[0], kernel_size[1], kernel_size[2], pad_size[0], pad_size[1], pad_size[2], stride_size[0], stride_size[1], stride_size[2], dilation_size[0], dilation_size[1], dilation_size[2], data_hvol); } if (dim == 2) { col2im<Dtype, Dtype>( stream, data_col, channels, input_size[0], input_size[1], output_size[0], output_size[1], kernel_size[0], kernel_size[1], pad_size[0], pad_size[1], stride_size[0], stride_size[1], dilation_size[0], dilation_size[1], data_hvol); } } /* check tensor data locations */ void slow_conv_dilated_location_check( const Tensor& input, const Tensor& weight, const Tensor& bias, const Tensor& grad_output) { // checking data locations of user-provided tensor arguments TensorArg input_arg{input, "input", 2}, weight_arg{weight, "weight", 3}, bias_arg{bias, "bias", 4}, grad_output_arg{grad_output, "grad_output", 5}; checkAllSameGPU("slow_conv_dilated_all_cuda_template", {input_arg, weight_arg}); if (bias.defined()) { checkAllSameGPU("slow_conv_dilated_all_cuda_template", {input_arg, bias_arg}); } if (grad_output.defined()) { checkAllSameGPU( "slow_conv_dilated_all_cuda_template", {input_arg, grad_output_arg}); } // we are not checking the data locations of other tensor // arguments such as output, grad_input, etc because of these are // allocated based on input options and hence these tensors always // have the same data location as of input tensor. } /* slow_conv_dilated_all_cuda_template Main worker. Computes tensors output, grad_input, grad_weight, and/or grad_bias if defined, respectively. */ template <int64_t dim> void slow_conv_dilated_all_cuda_template( Tensor& output, const Tensor& input, const Tensor& weight, const Tensor& bias, const Tensor& grad_output, Tensor& grad_input, Tensor& grad_weight, Tensor& grad_bias, IntArrayRef kernel_size, IntArrayRef stride_size, IntArrayRef pad_size, IntArrayRef dilation_size) { slow_conv_dilated_location_check(input, weight, bias, grad_output); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); auto options = input.options(); // The rear part of input tensor sizes: auto input_size = input.sizes().slice(2); // The rear part of output tensor sizes: auto output_size = internal::get_output_size<dim>( input, kernel_size, stride_size, pad_size, dilation_size); int64_t batchSize = input.size(0); int64_t nInputPlane = weight.size(1); int64_t nOutputPlane = weight.size(0); // Temporary buffers: int64_t m = std::accumulate( kernel_size.begin(), kernel_size.end(), 1, std::multiplies<int64_t>()); int64_t output_vsize = std::accumulate( output_size.begin(), output_size.end(), 1, std::multiplies<int64_t>()); Tensor columns = at::empty({0}, options); if (output.defined() || grad_weight.defined() || grad_input.defined()) { columns.resize_({nInputPlane * m, output_vsize}); } // Initialize if (grad_weight.defined()) { grad_weight.zero_(); } if (grad_bias.defined()) { grad_bias.zero_(); } if (output.defined() && !bias.defined()) { output.zero_(); } #ifdef __HIP_PLATFORM_HCC__ /* When using ROCm, the sum evaluation is inaccurate for double tensors. The reason is currently unknown. Hence, we use gemv for computing `grad_output_n.sum(dims)` until the ROCm-sum issue is resolved. */ Tensor ones = at::empty({0}, options); if (grad_bias.defined()) { ones.resize_({output_vsize}); ones.fill_(1); } /* MSVC does not like #ifdef-s inside the CPP macro AT_DISPATCH_FLOATING_TYPES_AND_HALF. So, we define the code branching outside the CPP macro: */ #define CALCULATE_GRAD_BIAS \ at::cuda::blas::gemv<scalar_t>( \ /*trans=*/'t', \ /* m=*/output_vsize, \ /* n=*/nOutputPlane, \ /*alpha=*/ScalarConvert<int, scalar_t>::to(1), \ /* A=*/grad_output_n.data_ptr<scalar_t>(), \ /* lda=*/output_vsize, \ /* x=*/ones.data_ptr<scalar_t>(), \ /* incx=*/1, \ /* beta=*/ScalarConvert<int, scalar_t>::to(1), \ /* y=*/grad_bias.data_ptr<scalar_t>(), \ /* incy=*/1) #else #define CALCULATE_GRAD_BIAS grad_bias += grad_output_n.sum(dims) #endif // Helpers Tensor grad_output_n; std::vector<int64_t> dims(dim); std::iota(dims.begin(), dims.end(), 1); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "slow_conv_dilated<>", [&] { // For each elt in batch, do: for (int elt = 0; elt < batchSize; elt++) { // Matrix multiply per output: Tensor input_n = input.select(0, elt); // Output if (output.defined()) { Tensor output_n = output.select(0, elt); if (bias.defined()) { /* For gemm argument derivation, see slow_conv_dilated_all_cuda_template in ATen/native/DilatedConvolution.cpp */ for (int n = 0; n < nOutputPlane; n++) { output_n.select(0, n).fill_(bias[n]); } } // Extract columns: hvol2col<scalar_t, dim>( stream, input_n.data_ptr<scalar_t>(), nInputPlane, input_size, output_size, kernel_size, stride_size, pad_size, dilation_size, columns.data_ptr<scalar_t>()); /* For gemm argument derivation, see slow_conv_dilated_all_cuda_template in ATen/native/DilatedConvolution.cpp */ at::cuda::blas::gemm<scalar_t>( /*transa=*/'n', /*transb=*/'n', /* m=*/columns.size(1), /* n=*/nOutputPlane, /* k=*/columns.size(0), /* alpha=*/ScalarConvert<int, scalar_t>::to(1), /* A=*/columns.data_ptr<scalar_t>(), /* lda=*/columns.size(1), /* B=*/weight.data_ptr<scalar_t>(), /* ldb=*/columns.size(0), /* beta=*/ScalarConvert<int, scalar_t>::to(1), /* C=*/output_n.data_ptr<scalar_t>(), /* ldc=*/columns.size(1)); } else { // All gradients grad_output_n = grad_output.select(0, elt); } // Gradient of input: if (grad_input.defined()) { /* For gemm argument derivation, see slow_conv_dilated_all_cuda_template in ATen/native/DilatedConvolution.cpp */ at::cuda::blas::gemm<scalar_t>( /*transa=*/'n', /*transb=*/'t', /* m=*/columns.size(1), /* n=*/columns.size(0), /* k=*/nOutputPlane, /* alpha=*/ScalarConvert<int, scalar_t>::to(1), /* A=*/grad_output_n.data_ptr<scalar_t>(), /* lda=*/columns.size(1), /* B=*/weight.data_ptr<scalar_t>(), /* ldb=*/columns.size(0), /* beta=*/ScalarConvert<int, scalar_t>::to(0), /* C=*/columns.data_ptr<scalar_t>(), /* ldc=*/columns.size(1)); // Unpack columns back into input: Tensor grad_input_n = grad_input.select(0, elt); col2hvol<scalar_t, dim>( stream, columns.data_ptr<scalar_t>(), nInputPlane, input_size, output_size, kernel_size, stride_size, pad_size, dilation_size, grad_input_n.data_ptr<scalar_t>()); } // Gradient of weight: if (grad_weight.defined()) { // Extract columns: hvol2col<scalar_t, dim>( stream, input_n.data_ptr<scalar_t>(), nInputPlane, input_size, output_size, kernel_size, stride_size, pad_size, dilation_size, columns.data_ptr<scalar_t>()); scalar_t scale = ScalarConvert<int, scalar_t>::to( 1); // TODO: expose as argument? /* For gemm argument derivation, see slow_conv_dilated_all_cuda_template in ATen/native/DilatedConvolution.cpp */ at::cuda::blas::gemm<scalar_t>( /*transa=*/'t', /*transb=*/'n', /* m=*/columns.size(0), /* n=*/nOutputPlane, /* k=*/columns.size(1), /* alpha=*/scale, /* A=*/columns.data_ptr<scalar_t>(), /* lda=*/columns.size(1), /* B=*/grad_output_n.data_ptr<scalar_t>(), /* ldb=*/columns.size(1), /* beta=*/ScalarConvert<int, scalar_t>::to(1), /* C=*/grad_weight.data_ptr<scalar_t>(), /* ldc=*/columns.size(0)); } // Gradient of bias: if (grad_bias.defined()) { /* For gemv argument derivation, see slow_conv_dilated_all_cpu_template in ATen/native/DilatedConvolution.cpp */ CALCULATE_GRAD_BIAS; /* MSVC does not like #ifdef-s inside the CPP macros, see above. */ /* TODO: when scale != 1 is introduced then use: grad_bias += scale * grad_output_n.sum(dims); */ } } }); } // slow_conv_dilated_all_cuda_template } // namespace Tensor slow_conv_dilated2d_cuda( const Tensor& input, const Tensor& weight, IntArrayRef kernel_size, const Tensor& bias, IntArrayRef stride_size, IntArrayRef pad_size, IntArrayRef dilation_size) { Tensor undefined; internal::slow_conv_dilated_shape_check<2>( input, weight, bias, undefined, kernel_size, stride_size, pad_size, dilation_size); auto is_batch = input.dim() == 4; auto options = input.options(); // calculate output tensor size auto output_size = internal::get_output_size<2>( input, weight, kernel_size, stride_size, pad_size, dilation_size); // template function assumes batched tensors. unsqueeze(0) will // insert batch dimension without affecting the original tensor. const Tensor input_ = (is_batch ? input.contiguous() : input.contiguous().unsqueeze(0)); const Tensor weight_ = weight.contiguous(); const Tensor bias_ = (bias.defined() ? bias.contiguous() : undefined); Tensor output = at::empty(output_size, options); Tensor output_ = (is_batch ? output : output.unsqueeze(0)); slow_conv_dilated_all_cuda_template<2>( output_, input_, weight_, bias_, undefined, undefined, undefined, undefined, kernel_size, stride_size, pad_size, dilation_size); return output; } std::tuple<Tensor, Tensor, Tensor> slow_conv_dilated2d_backward_cuda( const Tensor& grad_output, const Tensor& input, const Tensor& weight, IntArrayRef kernel_size, IntArrayRef stride_size, IntArrayRef pad_size, IntArrayRef dilation_size, const std::array<bool, 3ul> output_mask) { Tensor undefined; internal::slow_conv_dilated_shape_check<2>( input, weight, undefined, grad_output, kernel_size, stride_size, pad_size, dilation_size); auto is_batch = input.dim() == 4; auto options = grad_output.options(); // template function assumes batched tensors. unsqueeze(0) will // insert batch dimension without affecting the original tensor. const Tensor grad_output_ = (is_batch ? grad_output.contiguous() : grad_output.contiguous().unsqueeze(0)); const Tensor input_ = (is_batch ? input.contiguous() : input.contiguous().unsqueeze(0)); const Tensor weight_ = weight.contiguous(); // compute only gradients for which the corresponding output_mask is true: Tensor grad_input = (output_mask[0] ? at::empty(input.sizes(), options) : undefined); Tensor grad_weight = (output_mask[1] ? at::empty(weight.sizes(), options) : undefined); Tensor grad_bias = (output_mask[2] ? at::empty(weight.size(0), options) : undefined); Tensor grad_input_ = (output_mask[0] ? (is_batch ? grad_input : grad_input.unsqueeze(0)) : undefined); slow_conv_dilated_all_cuda_template<2>( undefined, input_, weight_, undefined, grad_output_, grad_input, grad_weight, grad_bias, kernel_size, stride_size, pad_size, dilation_size); return std::tie(grad_input, grad_weight, grad_bias); } Tensor slow_conv_dilated3d_cuda( const Tensor& input, const Tensor& weight, IntArrayRef kernel_size, const Tensor& bias, IntArrayRef stride_size, IntArrayRef pad_size, IntArrayRef dilation_size) { Tensor undefined; internal::slow_conv_dilated_shape_check<3>( input, weight, bias, undefined, kernel_size, stride_size, pad_size, dilation_size); auto is_batch = input.dim() == 5; auto options = input.options(); // calculate output tensor size auto output_size = internal::get_output_size<3>( input, weight, kernel_size, stride_size, pad_size, dilation_size); // template function assumes batched tensors. unsqueeze(0) will // insert batch dimension without affecting the original tensor. const Tensor input_ = (is_batch ? input.contiguous() : input.contiguous().unsqueeze(0)); const Tensor weight_ = weight.contiguous(); const Tensor bias_ = (bias.defined() ? bias.contiguous() : undefined); Tensor output = at::empty(output_size, options); Tensor output_ = (is_batch ? output : output.unsqueeze(0)); slow_conv_dilated_all_cuda_template<3>( output, input_, weight_, bias_, undefined, undefined, undefined, undefined, kernel_size, stride_size, pad_size, dilation_size); return output; } std::tuple<Tensor, Tensor, Tensor> slow_conv_dilated3d_backward_cuda( const Tensor& grad_output, const Tensor& input, const Tensor& weight, IntArrayRef kernel_size, IntArrayRef stride_size, IntArrayRef pad_size, IntArrayRef dilation_size, const std::array<bool, 3ul> output_mask) { Tensor undefined; internal::slow_conv_dilated_shape_check<3>( input, weight, undefined, grad_output, kernel_size, stride_size, pad_size, dilation_size); auto is_batch = input.dim() == 5; auto options = grad_output.options(); // template function assumes batched tensors. unsqueeze(0) will // insert batch dimension without affecting the original tensor. const Tensor grad_output_ = (is_batch ? grad_output.contiguous() : grad_output.contiguous().unsqueeze(0)); const Tensor input_ = (is_batch ? input.contiguous() : input.contiguous().unsqueeze(0)); const Tensor weight_ = weight.contiguous(); // compute only gradients for which the corresponding output_mask is true: Tensor grad_input = (output_mask[0] ? at::empty(input.sizes(), options) : undefined); Tensor grad_weight = (output_mask[1] ? at::empty(weight.sizes(), options) : undefined); Tensor grad_bias = (output_mask[2] ? at::empty(weight.size(0), options) : undefined); Tensor grad_input_ = (output_mask[0] ? (is_batch ? grad_input : grad_input.unsqueeze(0)) : undefined); slow_conv_dilated_all_cuda_template<3>( undefined, input_, weight_, undefined, grad_output_, grad_input, grad_weight, grad_bias, kernel_size, stride_size, pad_size, dilation_size); return std::tie(grad_input, grad_weight, grad_bias); } } // namespace native } // namespace at
8b2d7957b56ddca39aba1c3e284223005b3d44fc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <random> #include <cutf/memory.hpp> #include <cutf/experimental/mantissa.hpp> #include <cutf/type.hpp> #include <cutf/debug/matrix.hpp> constexpr unsigned warp_size = 32; __global__ void m16n16k16_cut(float* const c_ptr, const float* const a_ptr, const float* const b_ptr) { constexpr unsigned N = 16; const unsigned lane_id = threadIdx.x & 0x1f; const auto m = lane_id & 0xf; const auto n_offset = lane_id / N; for (unsigned i = 0; i < N; i+= warp_size / N) { const auto n = i + n_offset; float sum = 0.0f; for (unsigned k = 0; k < N; k++) { sum += cutf::experimental::mantissa::cut_mantissa<10, cutf::rounding::rr>(a_ptr[m + k * N]) * cutf::experimental::mantissa::cut_mantissa<10, cutf::rounding::rr>(b_ptr[k + n * N]); } c_ptr[m + n * N] += sum; } } __global__ void m16n16k16_half(float* const c_ptr, const float* const a_ptr, const float* const b_ptr) { constexpr unsigned N = 16; const unsigned lane_id = threadIdx.x & 0x1f; const auto m = lane_id & 0xf; const auto n_offset = lane_id / N; for (unsigned i = 0; i < N; i+= warp_size / N) { const auto n = i + n_offset; float sum = 0.0f; for (unsigned k = 0; k < N; k++) { sum += cutf::type::cast<float>(cutf::type::cast<half>(a_ptr[m + k * N])) * cutf::type::cast<float>(cutf::type::cast<half>(b_ptr[k + n * N])); } c_ptr[m + n * N] += sum; } } __global__ void m16n16k16_base(float* const c_ptr, const float* const a_ptr, const float* const b_ptr) { constexpr unsigned N = 16; const unsigned lane_id = threadIdx.x & 0x1f; const auto m = lane_id & 0xf; const auto n_offset = lane_id / N; for (unsigned i = 0; i < N; i+= warp_size / N) { const auto n = i + n_offset; float sum = 0.0f; for (unsigned k = 0; k < N; k++) { sum += a_ptr[m + k * N] * b_ptr[k + n * N]; } c_ptr[m + n * N] += sum; } } double get_max_error(const float* const base_ptr, const float* const cut_ptr, const unsigned m, const unsigned n) { double max_error = 0.0; for (unsigned i = 0; i < m; i++) { for (unsigned j = 0; j < n; j++) { max_error = ::max(std::abs(static_cast<double>(base_ptr[i * n + j]) - cut_ptr[i * n + j]), max_error); } } return max_error; } int main() { constexpr unsigned N = 16; auto A = cutf::memory::get_host_unique_ptr<float>(N * N); auto B = cutf::memory::get_host_unique_ptr<float>(N * N); auto C_cut = cutf::memory::get_host_unique_ptr<float>(N * N); auto C_half = cutf::memory::get_host_unique_ptr<float>(N * N); auto C_base = cutf::memory::get_host_unique_ptr<float>(N * N); std::mt19937 mt(std::random_device{}()); float max_range = 1.0f; std::uniform_real_distribution<float> dist(-max_range, max_range); for (unsigned i = 0; i < N * N; i++) { A.get()[i] = dist(mt); B.get()[i] = dist(mt); C_cut.get()[i] = 0.0f; C_half.get()[i] = 0.0f; C_base.get()[i] = 0.0f; } hipLaunchKernelGGL(( m16n16k16_cut), dim3(1), dim3(warp_size), 0, 0, C_cut.get(), A.get(), B.get()); hipLaunchKernelGGL(( m16n16k16_half), dim3(1), dim3(warp_size), 0, 0, C_half.get(), A.get(), B.get()); hipLaunchKernelGGL(( m16n16k16_base), dim3(1), dim3(warp_size), 0, 0, C_base.get(), A.get(), B.get()); hipDeviceSynchronize(); std::printf("[cut ] max_error = %e\n", get_max_error(C_base.get(), C_half.get(), N, N)); std::printf("[half] max_error = %e\n", get_max_error(C_base.get(), C_cut.get(), N, N)); }
8b2d7957b56ddca39aba1c3e284223005b3d44fc.cu
#include <iostream> #include <random> #include <cutf/memory.hpp> #include <cutf/experimental/mantissa.hpp> #include <cutf/type.hpp> #include <cutf/debug/matrix.hpp> constexpr unsigned warp_size = 32; __global__ void m16n16k16_cut(float* const c_ptr, const float* const a_ptr, const float* const b_ptr) { constexpr unsigned N = 16; const unsigned lane_id = threadIdx.x & 0x1f; const auto m = lane_id & 0xf; const auto n_offset = lane_id / N; for (unsigned i = 0; i < N; i+= warp_size / N) { const auto n = i + n_offset; float sum = 0.0f; for (unsigned k = 0; k < N; k++) { sum += cutf::experimental::mantissa::cut_mantissa<10, cutf::rounding::rr>(a_ptr[m + k * N]) * cutf::experimental::mantissa::cut_mantissa<10, cutf::rounding::rr>(b_ptr[k + n * N]); } c_ptr[m + n * N] += sum; } } __global__ void m16n16k16_half(float* const c_ptr, const float* const a_ptr, const float* const b_ptr) { constexpr unsigned N = 16; const unsigned lane_id = threadIdx.x & 0x1f; const auto m = lane_id & 0xf; const auto n_offset = lane_id / N; for (unsigned i = 0; i < N; i+= warp_size / N) { const auto n = i + n_offset; float sum = 0.0f; for (unsigned k = 0; k < N; k++) { sum += cutf::type::cast<float>(cutf::type::cast<half>(a_ptr[m + k * N])) * cutf::type::cast<float>(cutf::type::cast<half>(b_ptr[k + n * N])); } c_ptr[m + n * N] += sum; } } __global__ void m16n16k16_base(float* const c_ptr, const float* const a_ptr, const float* const b_ptr) { constexpr unsigned N = 16; const unsigned lane_id = threadIdx.x & 0x1f; const auto m = lane_id & 0xf; const auto n_offset = lane_id / N; for (unsigned i = 0; i < N; i+= warp_size / N) { const auto n = i + n_offset; float sum = 0.0f; for (unsigned k = 0; k < N; k++) { sum += a_ptr[m + k * N] * b_ptr[k + n * N]; } c_ptr[m + n * N] += sum; } } double get_max_error(const float* const base_ptr, const float* const cut_ptr, const unsigned m, const unsigned n) { double max_error = 0.0; for (unsigned i = 0; i < m; i++) { for (unsigned j = 0; j < n; j++) { max_error = std::max(std::abs(static_cast<double>(base_ptr[i * n + j]) - cut_ptr[i * n + j]), max_error); } } return max_error; } int main() { constexpr unsigned N = 16; auto A = cutf::memory::get_host_unique_ptr<float>(N * N); auto B = cutf::memory::get_host_unique_ptr<float>(N * N); auto C_cut = cutf::memory::get_host_unique_ptr<float>(N * N); auto C_half = cutf::memory::get_host_unique_ptr<float>(N * N); auto C_base = cutf::memory::get_host_unique_ptr<float>(N * N); std::mt19937 mt(std::random_device{}()); float max_range = 1.0f; std::uniform_real_distribution<float> dist(-max_range, max_range); for (unsigned i = 0; i < N * N; i++) { A.get()[i] = dist(mt); B.get()[i] = dist(mt); C_cut.get()[i] = 0.0f; C_half.get()[i] = 0.0f; C_base.get()[i] = 0.0f; } m16n16k16_cut<<<1, warp_size>>>(C_cut.get(), A.get(), B.get()); m16n16k16_half<<<1, warp_size>>>(C_half.get(), A.get(), B.get()); m16n16k16_base<<<1, warp_size>>>(C_base.get(), A.get(), B.get()); cudaDeviceSynchronize(); std::printf("[cut ] max_error = %e\n", get_max_error(C_base.get(), C_half.get(), N, N)); std::printf("[half] max_error = %e\n", get_max_error(C_base.get(), C_cut.get(), N, N)); }
89be85a21c3f3a93ec0fdb671bceae0f17757a76.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "common.h" void checkCUDAErrorFn(const char *msg, const char *file, int line) { hipError_t err = hipGetLastError(); if (hipSuccess == err) { return; } fprintf(stderr, "CUDA error"); if (file) { fprintf(stderr, " (%s:%d)", file, line); } fprintf(stderr, ": %s: %s\n", msg, hipGetErrorString(err)); exit(EXIT_FAILURE); } namespace StreamCompaction { namespace Common { /** * Maps an array to an array of 0s and 1s for stream compaction. Elements * which map to 0 will be removed, and elements which map to 1 will be kept. */ __global__ void kernMapToBoolean(int n, int *bools, const int *idata) { int tx = threadIdx.x; if (tx >= n) { return; } if (idata[tx] > 0) { bools[tx] = 1; } else { bools[tx] = 0; } } /** * Performs scatter on an array. That is, for each element in idata, * if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]]. */ __global__ void kernScatter(int n, int *odata, const int *idata, const int *bools, const int *indices) { int tx = threadIdx.x; if (tx >= n) { return; } if (bools[tx] == 1) { odata[indices[tx]] = idata[tx]; } } } }
89be85a21c3f3a93ec0fdb671bceae0f17757a76.cu
#include "common.h" void checkCUDAErrorFn(const char *msg, const char *file, int line) { cudaError_t err = cudaGetLastError(); if (cudaSuccess == err) { return; } fprintf(stderr, "CUDA error"); if (file) { fprintf(stderr, " (%s:%d)", file, line); } fprintf(stderr, ": %s: %s\n", msg, cudaGetErrorString(err)); exit(EXIT_FAILURE); } namespace StreamCompaction { namespace Common { /** * Maps an array to an array of 0s and 1s for stream compaction. Elements * which map to 0 will be removed, and elements which map to 1 will be kept. */ __global__ void kernMapToBoolean(int n, int *bools, const int *idata) { int tx = threadIdx.x; if (tx >= n) { return; } if (idata[tx] > 0) { bools[tx] = 1; } else { bools[tx] = 0; } } /** * Performs scatter on an array. That is, for each element in idata, * if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]]. */ __global__ void kernScatter(int n, int *odata, const int *idata, const int *bools, const int *indices) { int tx = threadIdx.x; if (tx >= n) { return; } if (bools[tx] == 1) { odata[indices[tx]] = idata[tx]; } } } }
b577921a096ca788699efe15d9af1d739572169f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "residualMaker.cuh" #include "stdio.h" #define KERNELS_COUNT 30 __global__ void make_res_1st(float * src, float * dst, int src_width, int src_height, int kernel_index, int tile_width, int tile_height) { int tile_top_left = (blockIdx.y * tile_height * blockDim.y + threadIdx.y * tile_height) * (src_width/*gridDim.x * blockDim.x * tile_width*/)+ blockIdx.x * (blockDim.x * tile_width) + threadIdx.x * tile_width; int row, col; row = tile_top_left / src_width + tile_height + 1; col = tile_top_left % src_width + tile_width + 1; /*if (blockIdx.x > blockDim.x - 2 || blockIdx.y > blockDim.y - 2 || !blockIdx.x || !blockIdx.y) { if ((!row) || (!col) || (col >= src_width - 1) || row >= (src_height - 1)) { return; } }*/ float ptr_data[11][11]; float ptr_results[11][11]; //return; int offset = 0; //int end0 = row >= (src_height - 1) ? src_height % 8 : tile_height + 1; //int end1 = (col >= src_width - 1) ? src_width % 8 : tile_width + 1; int end0 = row >= (src_height - 1) ? src_height - tile_top_left / src_width : tile_height + 1; int end1 = (col >= src_width - 1) ? src_width - tile_top_left % src_width : tile_width + 1; int tile_top_left0 = tile_top_left; //tile_top_left0 += + 1; switch (kernel_index) { case 1://left case 8://leftdown src += src_width; break; case 2://right case 7://rightdown case 4://down src += src_width + 1; break; case 3://up case 6://RU src += 1; break; } for (int i = 0; i < end0; i++) { //offset = i * tile_width; for (int j = 0; j < end1; j++) { ptr_data[i][j] = src[tile_top_left0 + j]; } tile_top_left0 += src_width; } //return; tile_top_left0 = tile_top_left; switch (kernel_index) { case 1://left /*for (int i = 0; i < tile_height ; i++) { for (int j = 0; j < tile_width; j++) { ptr_results[i][j] = ptr_data[i][j]; } }*/ for (int i = 0; i < tile_height; i++) { for (int j = 1; j < tile_width + 1; j++) { ptr_results[i][j - 1] = ptr_data[i][j - 1] - ptr_data[i][j]; } } break; case 2://right for (int i = 0; i < tile_height; i++) { //offset = i * tile_width; for (int j = 0; j < tile_width; j++) { ptr_results[i][j] = ptr_data[i][j + 1] - ptr_data[i][j]; } tile_top_left0 += src_width; } break; case 3://up for (int i = 1; i < tile_height + 1; i++) { //offset = i * tile_width; for (int j = 0; j < tile_width; j++) { ptr_results[i - 1][j] = ptr_data[i - 1][j] - ptr_data[i][j]; } tile_top_left0 += src_width; } break; case 4://down for (int i = 0; i < tile_height; i++) { //offset = i * tile_width; for (int j = 0; j < tile_width; j++) { ptr_results[i][j] = ptr_data[i + 1][j] - ptr_data[i][j]; } tile_top_left0 += src_width; } break; case 5://leftup for (int i = 1; i < tile_height + 1; i++) { //offset = i * tile_width; for (int j = 1; j < tile_width + 1; j++) { ptr_results[i - 1][j - 1] = ptr_data[i - 1][j - 1] - ptr_data[i][j]; } tile_top_left0 += src_width; } break; case 6://rightup for (int i = 1; i < tile_height + 1; i++) { //offset = i * tile_width; for (int j = 0; j < tile_width; j++) { ptr_results[i - 1][j] = ptr_data[i - 1][j + 1] - ptr_data[i][j]; } tile_top_left0 += src_width; } break; case 7://rightdown for (int i = 0; i < tile_height; i++) { //offset = i * tile_width; for (int j = 0; j < tile_width; j++) { ptr_results[i][j] = ptr_data[i + 1][j + 1] - ptr_data[i][j]; } tile_top_left0 += src_width; } break; case 8://leftdown for (int i = 0; i < tile_height; i++) { //offset = i * tile_width; for (int j = 1; j < tile_width + 1; j++) { ptr_results[i][j - 1] = ptr_data[i + 1][j - 1] - ptr_data[i][j]; } tile_top_left0 += src_width; } break; } tile_top_left0 = tile_top_left; for (int i = 0; i < tile_height; i++) { for (int j = 0; j < tile_width; j++) { dst[tile_top_left0 + j] = ptr_results[i][j]; } tile_top_left0 += src_width; } } __global__ void make_res_2x2(float * src, float * dst, int src_width, int src_height, int kernel_index, int tile_width, int tile_height) { int tile_top_left = (blockIdx.y * tile_height * blockDim.y + threadIdx.y * tile_height) * (src_width/*gridDim.x * blockDim.x * tile_width*/)+ blockIdx.x * (blockDim.x * tile_width) + threadIdx.x * tile_width; float ptr_data[15][15]; float ptr_results[15][15]; int row, col; //int end0 = row >= (src_height - 1) ? src_height % 8 : tile_height + 2; //int end1 = (col >= src_width - 1) ? src_width % 8 : tile_width + 2; row = tile_top_left / src_width + tile_height + 2; col = tile_top_left % src_width + tile_width + 2; //printf("%d", ) int end0 = (row >= src_height - 1) ? src_height - tile_top_left / src_width - 1: tile_height + 2; int end1 = (col >= src_width - 1) ? src_width - tile_top_left % src_width - 1: tile_width + 2; int tile_top_left0 = tile_top_left; switch (kernel_index) { case 4://h src += src_width; break; case 3://v src += 1; break; } for (int i = 0; i < end0; i++) { //offset = i * tile_width; for (int j = 0; j < end1; j++) { ptr_data[i][j] = src[tile_top_left0 + j]; } tile_top_left0 += src_width; } //return; tile_top_left0 = tile_top_left; switch (kernel_index) { case 1://Dh for (int i = 1; i < tile_height + 1; i++) { //offset = i * tile_width; for (int j = 1; j < tile_width + 1; j++) { ptr_results[i - 1][j - 1] = ptr_data[i - 1][j - 1] + ptr_data[i - 1][j] - ptr_data[i][j - 1] - ptr_data[i][j]; } } break; case 2://Dv for (int i = 1; i < tile_height + 1; i++) { //offset = i * tile_width; for (int j = 1; j < tile_width + 1; j++) { ptr_results[i - 1][j - 1] = ptr_data[i - 1][j - 1] - ptr_data[i - 1][j] + ptr_data[i][j - 1] - ptr_data[i][j]; } } break; case 3://Dd for (int i = 1; i < tile_height + 1; i++) { //offset = i * tile_width; for (int j = 1; j < tile_width + 1; j++) { ptr_results[i - 1][j - 1] = -ptr_data[i - 1][j - 1] + ptr_data[i - 1][j] + ptr_data[i][j - 1] - ptr_data[i][j]; } } break; } for (int i = 0; i < tile_height; i++) { for (int j = 0; j < tile_width; j++) { dst[tile_top_left0 + j] = ptr_results[i][j]; } tile_top_left0 += src_width; } } __global__ void make_res_3st(float * src, float * dst, int src_width, int src_height, int kernel_index, int tile_width, int tile_height) { int src_size = src_height * src_width; int tile_size = tile_width * tile_height; int tile_top_left = (blockIdx.y * tile_height * blockDim.y + threadIdx.y * tile_height) * (src_width/*gridDim.x * blockDim.x * tile_width*/)+ blockIdx.x * (blockDim.x * tile_width) + threadIdx.x * tile_width; float ptr_data[15][15]; float ptr_results[15][15]; //return; int offset = 0; /*if (blockIdx.x > blockDim.x - 2 || blockIdx.y > blockDim.y - 2 || !blockIdx.x || !blockIdx.y) { if ((tile_top_left % src_width < 2) || (tile_top_left % src_width == src_width - 2) || tile_top_left < src_width * 2 || tile_top_left >(src_size - 2*src_width)) return; }*/ int row, col; row = tile_top_left / src_width + tile_height + 3; col = tile_top_left % src_width + tile_width + 3; int end0 = row >= (src_height - 1) ? src_height - tile_top_left / src_width - 1: tile_height + 3; int end1 = (col >= src_width - 1) ? src_width - tile_top_left % src_width - 1 : tile_width + 3; int tile_top_left0 = tile_top_left; //tile_top_left0 += src_width + 1; //tile_top_left0 += src_width + 1; //return; switch (kernel_index) { case 1: src += src_width * 2; break; case 2: src += src_width * 2 + 1; break; case 7://rightdown src += src_width + 1; break; case 3://up src += 2; break; case 8://leftdown src += src_width; break; case 4://down src += src_width + 2; break; //case 2://right case 6://RU src += 1; break; } for (int i = 0; i < end0; i++) { //offset = i * tile_width; for (int j = 0; j < end1; j++) { ptr_data[i][j] = src[tile_top_left0 + j]; } tile_top_left0 += src_width; } //return; tile_top_left0 = tile_top_left; float f1_3 = 1 / 3.0f; switch (kernel_index) { case 1://left for (int i = 0; i < tile_height; i++) { //offset = i * tile_width; for (int j = 2; j < tile_width + 2; j++) { ptr_results[i][j - 2] = -ptr_data[i][j - 2] + 3 * ptr_data[i][j - 1] - 3 * ptr_data[i][j] + ptr_data[i][j + 1]; } tile_top_left0 += src_width; } break; case 2://right for (int i = 0; i < tile_height; i++) { //offset = i * tile_width; for (int j = 1; j < tile_width + 1; j++) { ptr_results[i][j - 1] = -ptr_data[i][j + 2] + 3 * ptr_data[i][j + 1] - 3 * ptr_data[i][j] + ptr_data[i][j - 1]; } tile_top_left0 += src_width; } break; case 3://up for (int i = 2; i < tile_height + 2; i++) { //offset = i * tile_width; for (int j = 0; j < tile_width; j++) { ptr_results[i - 2][j] = -ptr_data[i - 2][j] + 3 * ptr_data[i - 1][j] - 3 * ptr_data[i][j] + ptr_data[i + 1][j]; } tile_top_left0 += src_width; } break; case 4://down for (int i = 1; i < tile_height + 1; i++) { //offset = i * tile_width; for (int j = 0; j < tile_width; j++) { ptr_results[i - 1][j] = -ptr_data[i + 2][j] + 3 * ptr_data[i + 1][j] - 3 * ptr_data[i][j] + ptr_data[i - 1][j]; } tile_top_left0 += src_width; } break; case 5://leftup for (int i = 2; i < tile_height + 2; i++) { //offset = i * tile_width; for (int j = 2; j < tile_width + 2; j++) { ptr_results[i - 2][j - 2] = -ptr_data[i - 2][j - 2] + 3 * ptr_data[i - 1][j - 1] - 3 * ptr_data[i][j] + ptr_data[i + 1][j + 1]; } tile_top_left0 += src_width; } break; case 6://rightup for (int i = 2; i < tile_height + 2; i++) { //offset = i * tile_width; for (int j = 1; j < tile_width + 1; j++) { ptr_results[i - 2][j - 1] = -ptr_data[i - 2][j + 2] + 3 * ptr_data[i - 1][j + 1] - 3 * ptr_data[i][j] + ptr_data[i + 1][j - 1]; } tile_top_left0 += src_width; } break; case 7://rightdown for (int i = 1; i < tile_height + 1; i++) { //offset = i * tile_width; for (int j = 1; j < tile_width + 1; j++) { ptr_results[i - 1][j - 1] = -ptr_data[i + 2][j + 2] + 3 * ptr_data[i + 1][j + 1] - 3 * ptr_data[i][j] + ptr_data[i - 1][j - 1]; } tile_top_left0 += src_width; } break; case 8://leftdown for (int i = 1; i < tile_height + 1; i++) { //offset = i * tile_width; for (int j = 2; j < tile_width + 2; j++) { ptr_results[i - 1][j - 2] = -ptr_data[i + 2][j - 2] + 3 * ptr_data[i + 1][j - 1] - 3 * ptr_data[i][j] + ptr_data[i - 1][j + 1]; } tile_top_left0 += src_width; } break; } tile_top_left0 = tile_top_left; for (int i = 0; i < tile_height; i++) { for (int j = 0; j < tile_width; j++) { dst[tile_top_left0 + j] = ptr_results[i][j]; } tile_top_left0 += src_width; } }
b577921a096ca788699efe15d9af1d739572169f.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include "residualMaker.cuh" #include "stdio.h" #define KERNELS_COUNT 30 __global__ void make_res_1st(float * src, float * dst, int src_width, int src_height, int kernel_index, int tile_width, int tile_height) { int tile_top_left = (blockIdx.y * tile_height * blockDim.y + threadIdx.y * tile_height) * (src_width/*gridDim.x * blockDim.x * tile_width*/)+ blockIdx.x * (blockDim.x * tile_width) + threadIdx.x * tile_width; int row, col; row = tile_top_left / src_width + tile_height + 1; col = tile_top_left % src_width + tile_width + 1; /*if (blockIdx.x > blockDim.x - 2 || blockIdx.y > blockDim.y - 2 || !blockIdx.x || !blockIdx.y) { if ((!row) || (!col) || (col >= src_width - 1) || row >= (src_height - 1)) { return; } }*/ float ptr_data[11][11]; float ptr_results[11][11]; //return; int offset = 0; //int end0 = row >= (src_height - 1) ? src_height % 8 : tile_height + 1; //int end1 = (col >= src_width - 1) ? src_width % 8 : tile_width + 1; int end0 = row >= (src_height - 1) ? src_height - tile_top_left / src_width : tile_height + 1; int end1 = (col >= src_width - 1) ? src_width - tile_top_left % src_width : tile_width + 1; int tile_top_left0 = tile_top_left; //tile_top_left0 += + 1; switch (kernel_index) { case 1://left case 8://leftdown src += src_width; break; case 2://right case 7://rightdown case 4://down src += src_width + 1; break; case 3://up case 6://RU src += 1; break; } for (int i = 0; i < end0; i++) { //offset = i * tile_width; for (int j = 0; j < end1; j++) { ptr_data[i][j] = src[tile_top_left0 + j]; } tile_top_left0 += src_width; } //return; tile_top_left0 = tile_top_left; switch (kernel_index) { case 1://left /*for (int i = 0; i < tile_height ; i++) { for (int j = 0; j < tile_width; j++) { ptr_results[i][j] = ptr_data[i][j]; } }*/ for (int i = 0; i < tile_height; i++) { for (int j = 1; j < tile_width + 1; j++) { ptr_results[i][j - 1] = ptr_data[i][j - 1] - ptr_data[i][j]; } } break; case 2://right for (int i = 0; i < tile_height; i++) { //offset = i * tile_width; for (int j = 0; j < tile_width; j++) { ptr_results[i][j] = ptr_data[i][j + 1] - ptr_data[i][j]; } tile_top_left0 += src_width; } break; case 3://up for (int i = 1; i < tile_height + 1; i++) { //offset = i * tile_width; for (int j = 0; j < tile_width; j++) { ptr_results[i - 1][j] = ptr_data[i - 1][j] - ptr_data[i][j]; } tile_top_left0 += src_width; } break; case 4://down for (int i = 0; i < tile_height; i++) { //offset = i * tile_width; for (int j = 0; j < tile_width; j++) { ptr_results[i][j] = ptr_data[i + 1][j] - ptr_data[i][j]; } tile_top_left0 += src_width; } break; case 5://leftup for (int i = 1; i < tile_height + 1; i++) { //offset = i * tile_width; for (int j = 1; j < tile_width + 1; j++) { ptr_results[i - 1][j - 1] = ptr_data[i - 1][j - 1] - ptr_data[i][j]; } tile_top_left0 += src_width; } break; case 6://rightup for (int i = 1; i < tile_height + 1; i++) { //offset = i * tile_width; for (int j = 0; j < tile_width; j++) { ptr_results[i - 1][j] = ptr_data[i - 1][j + 1] - ptr_data[i][j]; } tile_top_left0 += src_width; } break; case 7://rightdown for (int i = 0; i < tile_height; i++) { //offset = i * tile_width; for (int j = 0; j < tile_width; j++) { ptr_results[i][j] = ptr_data[i + 1][j + 1] - ptr_data[i][j]; } tile_top_left0 += src_width; } break; case 8://leftdown for (int i = 0; i < tile_height; i++) { //offset = i * tile_width; for (int j = 1; j < tile_width + 1; j++) { ptr_results[i][j - 1] = ptr_data[i + 1][j - 1] - ptr_data[i][j]; } tile_top_left0 += src_width; } break; } tile_top_left0 = tile_top_left; for (int i = 0; i < tile_height; i++) { for (int j = 0; j < tile_width; j++) { dst[tile_top_left0 + j] = ptr_results[i][j]; } tile_top_left0 += src_width; } } __global__ void make_res_2x2(float * src, float * dst, int src_width, int src_height, int kernel_index, int tile_width, int tile_height) { int tile_top_left = (blockIdx.y * tile_height * blockDim.y + threadIdx.y * tile_height) * (src_width/*gridDim.x * blockDim.x * tile_width*/)+ blockIdx.x * (blockDim.x * tile_width) + threadIdx.x * tile_width; float ptr_data[15][15]; float ptr_results[15][15]; int row, col; //int end0 = row >= (src_height - 1) ? src_height % 8 : tile_height + 2; //int end1 = (col >= src_width - 1) ? src_width % 8 : tile_width + 2; row = tile_top_left / src_width + tile_height + 2; col = tile_top_left % src_width + tile_width + 2; //printf("%d", ) int end0 = (row >= src_height - 1) ? src_height - tile_top_left / src_width - 1: tile_height + 2; int end1 = (col >= src_width - 1) ? src_width - tile_top_left % src_width - 1: tile_width + 2; int tile_top_left0 = tile_top_left; switch (kernel_index) { case 4://h src += src_width; break; case 3://v src += 1; break; } for (int i = 0; i < end0; i++) { //offset = i * tile_width; for (int j = 0; j < end1; j++) { ptr_data[i][j] = src[tile_top_left0 + j]; } tile_top_left0 += src_width; } //return; tile_top_left0 = tile_top_left; switch (kernel_index) { case 1://Dh for (int i = 1; i < tile_height + 1; i++) { //offset = i * tile_width; for (int j = 1; j < tile_width + 1; j++) { ptr_results[i - 1][j - 1] = ptr_data[i - 1][j - 1] + ptr_data[i - 1][j] - ptr_data[i][j - 1] - ptr_data[i][j]; } } break; case 2://Dv for (int i = 1; i < tile_height + 1; i++) { //offset = i * tile_width; for (int j = 1; j < tile_width + 1; j++) { ptr_results[i - 1][j - 1] = ptr_data[i - 1][j - 1] - ptr_data[i - 1][j] + ptr_data[i][j - 1] - ptr_data[i][j]; } } break; case 3://Dd for (int i = 1; i < tile_height + 1; i++) { //offset = i * tile_width; for (int j = 1; j < tile_width + 1; j++) { ptr_results[i - 1][j - 1] = -ptr_data[i - 1][j - 1] + ptr_data[i - 1][j] + ptr_data[i][j - 1] - ptr_data[i][j]; } } break; } for (int i = 0; i < tile_height; i++) { for (int j = 0; j < tile_width; j++) { dst[tile_top_left0 + j] = ptr_results[i][j]; } tile_top_left0 += src_width; } } __global__ void make_res_3st(float * src, float * dst, int src_width, int src_height, int kernel_index, int tile_width, int tile_height) { int src_size = src_height * src_width; int tile_size = tile_width * tile_height; int tile_top_left = (blockIdx.y * tile_height * blockDim.y + threadIdx.y * tile_height) * (src_width/*gridDim.x * blockDim.x * tile_width*/)+ blockIdx.x * (blockDim.x * tile_width) + threadIdx.x * tile_width; float ptr_data[15][15]; float ptr_results[15][15]; //return; int offset = 0; /*if (blockIdx.x > blockDim.x - 2 || blockIdx.y > blockDim.y - 2 || !blockIdx.x || !blockIdx.y) { if ((tile_top_left % src_width < 2) || (tile_top_left % src_width == src_width - 2) || tile_top_left < src_width * 2 || tile_top_left >(src_size - 2*src_width)) return; }*/ int row, col; row = tile_top_left / src_width + tile_height + 3; col = tile_top_left % src_width + tile_width + 3; int end0 = row >= (src_height - 1) ? src_height - tile_top_left / src_width - 1: tile_height + 3; int end1 = (col >= src_width - 1) ? src_width - tile_top_left % src_width - 1 : tile_width + 3; int tile_top_left0 = tile_top_left; //tile_top_left0 += src_width + 1; //tile_top_left0 += src_width + 1; //return; switch (kernel_index) { case 1: src += src_width * 2; break; case 2: src += src_width * 2 + 1; break; case 7://rightdown src += src_width + 1; break; case 3://up src += 2; break; case 8://leftdown src += src_width; break; case 4://down src += src_width + 2; break; //case 2://right case 6://RU src += 1; break; } for (int i = 0; i < end0; i++) { //offset = i * tile_width; for (int j = 0; j < end1; j++) { ptr_data[i][j] = src[tile_top_left0 + j]; } tile_top_left0 += src_width; } //return; tile_top_left0 = tile_top_left; float f1_3 = 1 / 3.0f; switch (kernel_index) { case 1://left for (int i = 0; i < tile_height; i++) { //offset = i * tile_width; for (int j = 2; j < tile_width + 2; j++) { ptr_results[i][j - 2] = -ptr_data[i][j - 2] + 3 * ptr_data[i][j - 1] - 3 * ptr_data[i][j] + ptr_data[i][j + 1]; } tile_top_left0 += src_width; } break; case 2://right for (int i = 0; i < tile_height; i++) { //offset = i * tile_width; for (int j = 1; j < tile_width + 1; j++) { ptr_results[i][j - 1] = -ptr_data[i][j + 2] + 3 * ptr_data[i][j + 1] - 3 * ptr_data[i][j] + ptr_data[i][j - 1]; } tile_top_left0 += src_width; } break; case 3://up for (int i = 2; i < tile_height + 2; i++) { //offset = i * tile_width; for (int j = 0; j < tile_width; j++) { ptr_results[i - 2][j] = -ptr_data[i - 2][j] + 3 * ptr_data[i - 1][j] - 3 * ptr_data[i][j] + ptr_data[i + 1][j]; } tile_top_left0 += src_width; } break; case 4://down for (int i = 1; i < tile_height + 1; i++) { //offset = i * tile_width; for (int j = 0; j < tile_width; j++) { ptr_results[i - 1][j] = -ptr_data[i + 2][j] + 3 * ptr_data[i + 1][j] - 3 * ptr_data[i][j] + ptr_data[i - 1][j]; } tile_top_left0 += src_width; } break; case 5://leftup for (int i = 2; i < tile_height + 2; i++) { //offset = i * tile_width; for (int j = 2; j < tile_width + 2; j++) { ptr_results[i - 2][j - 2] = -ptr_data[i - 2][j - 2] + 3 * ptr_data[i - 1][j - 1] - 3 * ptr_data[i][j] + ptr_data[i + 1][j + 1]; } tile_top_left0 += src_width; } break; case 6://rightup for (int i = 2; i < tile_height + 2; i++) { //offset = i * tile_width; for (int j = 1; j < tile_width + 1; j++) { ptr_results[i - 2][j - 1] = -ptr_data[i - 2][j + 2] + 3 * ptr_data[i - 1][j + 1] - 3 * ptr_data[i][j] + ptr_data[i + 1][j - 1]; } tile_top_left0 += src_width; } break; case 7://rightdown for (int i = 1; i < tile_height + 1; i++) { //offset = i * tile_width; for (int j = 1; j < tile_width + 1; j++) { ptr_results[i - 1][j - 1] = -ptr_data[i + 2][j + 2] + 3 * ptr_data[i + 1][j + 1] - 3 * ptr_data[i][j] + ptr_data[i - 1][j - 1]; } tile_top_left0 += src_width; } break; case 8://leftdown for (int i = 1; i < tile_height + 1; i++) { //offset = i * tile_width; for (int j = 2; j < tile_width + 2; j++) { ptr_results[i - 1][j - 2] = -ptr_data[i + 2][j - 2] + 3 * ptr_data[i + 1][j - 1] - 3 * ptr_data[i][j] + ptr_data[i - 1][j + 1]; } tile_top_left0 += src_width; } break; } tile_top_left0 = tile_top_left; for (int i = 0; i < tile_height; i++) { for (int j = 0; j < tile_width; j++) { dst[tile_top_left0 + j] = ptr_results[i][j]; } tile_top_left0 += src_width; } }