hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
ff9e5dcebc729b1a6b24c3d7fb40408fbf56a87f.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "__intToLong.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *A = NULL; hipMalloc(&A, XSIZE*YSIZE); long long *B = NULL; hipMalloc(&B, XSIZE*YSIZE); int N = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( __intToLong), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,N); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( __intToLong), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,N); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( __intToLong), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,N); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
ff9e5dcebc729b1a6b24c3d7fb40408fbf56a87f.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "__intToLong.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *A = NULL; cudaMalloc(&A, XSIZE*YSIZE); long long *B = NULL; cudaMalloc(&B, XSIZE*YSIZE); int N = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); __intToLong<<<gridBlock,threadBlock>>>(A,B,N); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { __intToLong<<<gridBlock,threadBlock>>>(A,B,N); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { __intToLong<<<gridBlock,threadBlock>>>(A,B,N); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
50f387e52ff783506cedcba49cd55d8da5360fac.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // ---------------------------------------------------------------------------- // CUDA code to compute minimun distance between n points // #include <stdio.h> #include <stdlib.h> #include <math.h> #include <float.h> #include <time.h> #define MAX_POINTS 1048576 __device__ static float f_atomicMin(float* address, float val) { int* address_as_i = (int*) address; int old = *address_as_i, assumed; do { assumed = old; old = ::atomicCAS(address_as_i, assumed, __float_as_int(::fminf(val, __int_as_float(assumed)))); } while (assumed != old); return __int_as_float(old); } // ---------------------------------------------------------------------------- // Kernel Function to compute distance between all pairs of points // Input: // X: X[i] = x-coordinate of the ith point // Y: Y[i] = y-coordinate of the ith point // n: number of points // Output: // D: D[0] = minimum distance // __global__ void minimum_distance(float * X, float * Y, float * D, int n) { int i = threadIdx.x + blockIdx.x * blockDim.x; float dx, dy, Dij, min_distance, min_distance_i; int j; dx = X[1]-X[0]; dy = Y[1]-Y[0]; min_distance = sqrtf(dx*dx+dy*dy); for (j = i+1; j < i+2; j++) { dx = X[j]-X[i]; dy = Y[j]-Y[i]; min_distance_i = sqrtf(dx*dx+dy*dy); } for (j = i+1; j < n; j++) { dx = X[j]-X[i]; dy = Y[j]-Y[i]; Dij = sqrtf(dx*dx+dy*dy); if (min_distance_i > Dij) min_distance_i = Dij; } if (min_distance > min_distance_i) min_distance = min_distance_i; f_atomicMin(D, min_distance); } // ---------------------------------------------------------------------------- // Host function to compute minimum distance between points // Input: // X: X[i] = x-coordinate of the ith point // Y: Y[i] = y-coordinate of the ith point // n: number of points // Output: // D: minimum distance // float minimum_distance_host(float * X, float * Y, int n) { float dx, dy, Dij, min_distance, min_distance_i; int i, j; dx = X[1]-X[0]; dy = Y[1]-Y[0]; min_distance = sqrtf(dx*dx+dy*dy); for (i = 0; i < n-1; i++) { for (j = i+1; j < i+2; j++) { dx = X[j]-X[i]; dy = Y[j]-Y[i]; min_distance_i = sqrtf(dx*dx+dy*dy); } for (j = i+1; j < n; j++) { dx = X[j]-X[i]; dy = Y[j]-Y[i]; Dij = sqrtf(dx*dx+dy*dy); if (min_distance_i > Dij) min_distance_i = Dij; } if (min_distance > min_distance_i) min_distance = min_distance_i; } return min_distance; } // ---------------------------------------------------------------------------- // Print device properties void print_device_properties() { int i, deviceCount; hipDeviceProp_t deviceProp; hipGetDeviceCount(&deviceCount); printf("------------------------------------------------------------\n"); printf("Number of GPU devices found = %d\n", deviceCount); for ( i = 0; i < deviceCount; ++i ) { hipGetDeviceProperties(&deviceProp, i); printf("[Device: %1d] Compute Capability %d.%d.\n", i, deviceProp.major, deviceProp.minor); printf(" ... multiprocessor count = %d\n", deviceProp.multiProcessorCount); printf(" ... max threads per multiprocessor = %d\n", deviceProp.maxThreadsPerMultiProcessor); printf(" ... max threads per block = %d\n", deviceProp.maxThreadsPerBlock); printf(" ... max block dimension = %d, %d, %d (along x, y, z)\n", deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1], deviceProp.maxThreadsDim[2]); printf(" ... max grid size = %d, %d, %d (along x, y, z)\n", deviceProp.maxGridSize[0], deviceProp.maxGridSize[1], deviceProp.maxGridSize[2]); printf(" ... warp size = %d\n", deviceProp.warpSize); printf(" ... clock rate = %d MHz\n", deviceProp.clockRate/1000); } printf("------------------------------------------------------------\n"); } // ---------------------------------------------------------------------------- // Main program - initializes points and computes minimum distance // between the points // int main(int argc, char* argv[]) { // Host Data float * hVx; // host x-coordinate array float * hVy; // host y-coordinate array float hmin_dist = 100000.0; // minimum value on host // Device Data float * dVx; // device x-coordinate array float * dVy; // device x-coordinate array float * dmin_dist; // minimum value on device // Device parameters int MAX_BLOCK_SIZE; // Maximum number of threads allowed on the device // Timing variables hipEvent_t start, stop; // GPU timing variables struct timespec cpu_start, cpu_stop; // CPU timing variables float time_array[10]; // Other variables int i, size, num_points; float min_distance, sqrtn; int seed = 0; // Print device properties // print_device_properties(); // Get device information and set device to use int deviceCount; hipDeviceProp_t deviceProp; hipGetDeviceCount(&deviceCount); if (deviceCount > 0) { hipSetDevice(0); hipGetDeviceProperties(&deviceProp, 0); MAX_BLOCK_SIZE = deviceProp.maxThreadsPerBlock; } else { printf("Warning: No GPU device found ... results may be incorrect\n"); } // Timing initializations hipEventCreate(&start); hipEventCreate(&stop); // Check input if (argc != 2) { printf("Use: %s <number of points>\n", argv[0]); exit(0); } if ((num_points = atoi(argv[argc-1])) < 2) { printf("Minimum number of points allowed: 2\n"); exit(0); } if ((num_points = atoi(argv[argc-1])) > MAX_POINTS) { printf("Maximum number of points allowed: %d\n", MAX_POINTS); exit(0); } // Allocate host coordinate arrays size = num_points * sizeof(float); hVx = (float *) malloc(size); hVy = (float *) malloc(size); // Initialize points srand48(seed); sqrtn = (float) sqrt(num_points); for (i = 0; i < num_points; i++) { hVx[i] = sqrtn * (float)drand48(); hVy[i] = sqrtn * (float)drand48(); } // Allocate device coordinate arrays hipMalloc(&dVx, size); hipMalloc(&dVy, size); hipMalloc(&dmin_dist, 1 * sizeof(float)); // Copy coordinate arrays from host memory to device memory hipEventRecord( start, 0 ); float init_dist[1] = {1000.0f}; hipMemcpy(dVx, hVx, size, hipMemcpyHostToDevice); hipMemcpy(dVy, hVy, size, hipMemcpyHostToDevice); hipMemcpy(dmin_dist, init_dist, sizeof(float), hipMemcpyHostToDevice); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&(time_array[0]), start, stop); // Invoke kernel hipEventRecord( start, 0 ); // ------------------------------------------------------------ if((num_points) <= MAX_BLOCK_SIZE) { dim3 ablock(num_points, 1, 1); dim3 agrid(1, 1); printf("Can fit all points on one block: agrid=(%d, %d, %d) ablock=(%d, %d, %d)\n", agrid.x, agrid.y, agrid.z, ablock.x, ablock.y, ablock.z); hipLaunchKernelGGL(( minimum_distance), dim3(agrid), dim3(ablock), 0, 0, dVx, dVy, dmin_dist, num_points); hipError_t err = hipGetLastError(); if (err != hipSuccess) printf("Error: %s\n", hipGetErrorString(err)); } else { dim3 ablock(MAX_BLOCK_SIZE, 1, 1); int numBlocks = (num_points) / MAX_BLOCK_SIZE; dim3 agrid(numBlocks, numBlocks); printf("Must use multiple blocks: agrid=(%d, %d, %d) ablock=(%d, %d, %d)\n", agrid.x, agrid.y, agrid.z, ablock.x, ablock.y, ablock.z); hipLaunchKernelGGL(( minimum_distance), dim3(agrid), dim3(ablock), 0, 0, dVx, dVy, dmin_dist, num_points); hipError_t err = hipGetLastError(); if (err != hipSuccess) printf("Error: %s\n", hipGetErrorString(err)); } // ------------------------------------------------------------ hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&(time_array[1]), start, stop); // Copy result from device memory to host memory hipEventRecord( start, 0 ); hipMemcpy(&hmin_dist, dmin_dist, sizeof(float), hipMemcpyDeviceToHost); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&(time_array[2]), start, stop); // Compute minimum distance on host to check device computation clock_gettime(CLOCK_REALTIME, &cpu_start); min_distance = minimum_distance_host(hVx, hVy, num_points); clock_gettime(CLOCK_REALTIME, &cpu_stop); time_array[3] = 1000*((cpu_stop.tv_sec-cpu_start.tv_sec) +0.000000001*(cpu_stop.tv_nsec-cpu_start.tv_nsec)); // Print results printf("Number of Points = %d\n", num_points); printf("GPU Host-to-device = %f ms \n", time_array[0]); printf("GPU Device-to-host = %f ms \n", time_array[2]); printf("GPU execution time = %f ms \n", time_array[1]); printf("CPU execution time = %f ms\n", time_array[3]); printf("Min. distance (GPU) = %e\n", hmin_dist); printf("Min. distance (CPU) = %e\n", min_distance); printf("Relative error = %e\n", fabs(min_distance-hmin_dist)/min_distance); // Free device memory hipFree(dVx); hipFree(dVy); hipFree(dmin_dist); // Free host memory free(hVx); free(hVy); }
50f387e52ff783506cedcba49cd55d8da5360fac.cu
// ---------------------------------------------------------------------------- // CUDA code to compute minimun distance between n points // #include <stdio.h> #include <stdlib.h> #include <math.h> #include <float.h> #include <time.h> #define MAX_POINTS 1048576 __device__ static float f_atomicMin(float* address, float val) { int* address_as_i = (int*) address; int old = *address_as_i, assumed; do { assumed = old; old = ::atomicCAS(address_as_i, assumed, __float_as_int(::fminf(val, __int_as_float(assumed)))); } while (assumed != old); return __int_as_float(old); } // ---------------------------------------------------------------------------- // Kernel Function to compute distance between all pairs of points // Input: // X: X[i] = x-coordinate of the ith point // Y: Y[i] = y-coordinate of the ith point // n: number of points // Output: // D: D[0] = minimum distance // __global__ void minimum_distance(float * X, float * Y, float * D, int n) { int i = threadIdx.x + blockIdx.x * blockDim.x; float dx, dy, Dij, min_distance, min_distance_i; int j; dx = X[1]-X[0]; dy = Y[1]-Y[0]; min_distance = sqrtf(dx*dx+dy*dy); for (j = i+1; j < i+2; j++) { dx = X[j]-X[i]; dy = Y[j]-Y[i]; min_distance_i = sqrtf(dx*dx+dy*dy); } for (j = i+1; j < n; j++) { dx = X[j]-X[i]; dy = Y[j]-Y[i]; Dij = sqrtf(dx*dx+dy*dy); if (min_distance_i > Dij) min_distance_i = Dij; } if (min_distance > min_distance_i) min_distance = min_distance_i; f_atomicMin(D, min_distance); } // ---------------------------------------------------------------------------- // Host function to compute minimum distance between points // Input: // X: X[i] = x-coordinate of the ith point // Y: Y[i] = y-coordinate of the ith point // n: number of points // Output: // D: minimum distance // float minimum_distance_host(float * X, float * Y, int n) { float dx, dy, Dij, min_distance, min_distance_i; int i, j; dx = X[1]-X[0]; dy = Y[1]-Y[0]; min_distance = sqrtf(dx*dx+dy*dy); for (i = 0; i < n-1; i++) { for (j = i+1; j < i+2; j++) { dx = X[j]-X[i]; dy = Y[j]-Y[i]; min_distance_i = sqrtf(dx*dx+dy*dy); } for (j = i+1; j < n; j++) { dx = X[j]-X[i]; dy = Y[j]-Y[i]; Dij = sqrtf(dx*dx+dy*dy); if (min_distance_i > Dij) min_distance_i = Dij; } if (min_distance > min_distance_i) min_distance = min_distance_i; } return min_distance; } // ---------------------------------------------------------------------------- // Print device properties void print_device_properties() { int i, deviceCount; cudaDeviceProp deviceProp; cudaGetDeviceCount(&deviceCount); printf("------------------------------------------------------------\n"); printf("Number of GPU devices found = %d\n", deviceCount); for ( i = 0; i < deviceCount; ++i ) { cudaGetDeviceProperties(&deviceProp, i); printf("[Device: %1d] Compute Capability %d.%d.\n", i, deviceProp.major, deviceProp.minor); printf(" ... multiprocessor count = %d\n", deviceProp.multiProcessorCount); printf(" ... max threads per multiprocessor = %d\n", deviceProp.maxThreadsPerMultiProcessor); printf(" ... max threads per block = %d\n", deviceProp.maxThreadsPerBlock); printf(" ... max block dimension = %d, %d, %d (along x, y, z)\n", deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1], deviceProp.maxThreadsDim[2]); printf(" ... max grid size = %d, %d, %d (along x, y, z)\n", deviceProp.maxGridSize[0], deviceProp.maxGridSize[1], deviceProp.maxGridSize[2]); printf(" ... warp size = %d\n", deviceProp.warpSize); printf(" ... clock rate = %d MHz\n", deviceProp.clockRate/1000); } printf("------------------------------------------------------------\n"); } // ---------------------------------------------------------------------------- // Main program - initializes points and computes minimum distance // between the points // int main(int argc, char* argv[]) { // Host Data float * hVx; // host x-coordinate array float * hVy; // host y-coordinate array float hmin_dist = 100000.0; // minimum value on host // Device Data float * dVx; // device x-coordinate array float * dVy; // device x-coordinate array float * dmin_dist; // minimum value on device // Device parameters int MAX_BLOCK_SIZE; // Maximum number of threads allowed on the device // Timing variables cudaEvent_t start, stop; // GPU timing variables struct timespec cpu_start, cpu_stop; // CPU timing variables float time_array[10]; // Other variables int i, size, num_points; float min_distance, sqrtn; int seed = 0; // Print device properties // print_device_properties(); // Get device information and set device to use int deviceCount; cudaDeviceProp deviceProp; cudaGetDeviceCount(&deviceCount); if (deviceCount > 0) { cudaSetDevice(0); cudaGetDeviceProperties(&deviceProp, 0); MAX_BLOCK_SIZE = deviceProp.maxThreadsPerBlock; } else { printf("Warning: No GPU device found ... results may be incorrect\n"); } // Timing initializations cudaEventCreate(&start); cudaEventCreate(&stop); // Check input if (argc != 2) { printf("Use: %s <number of points>\n", argv[0]); exit(0); } if ((num_points = atoi(argv[argc-1])) < 2) { printf("Minimum number of points allowed: 2\n"); exit(0); } if ((num_points = atoi(argv[argc-1])) > MAX_POINTS) { printf("Maximum number of points allowed: %d\n", MAX_POINTS); exit(0); } // Allocate host coordinate arrays size = num_points * sizeof(float); hVx = (float *) malloc(size); hVy = (float *) malloc(size); // Initialize points srand48(seed); sqrtn = (float) sqrt(num_points); for (i = 0; i < num_points; i++) { hVx[i] = sqrtn * (float)drand48(); hVy[i] = sqrtn * (float)drand48(); } // Allocate device coordinate arrays cudaMalloc(&dVx, size); cudaMalloc(&dVy, size); cudaMalloc(&dmin_dist, 1 * sizeof(float)); // Copy coordinate arrays from host memory to device memory cudaEventRecord( start, 0 ); float init_dist[1] = {1000.0f}; cudaMemcpy(dVx, hVx, size, cudaMemcpyHostToDevice); cudaMemcpy(dVy, hVy, size, cudaMemcpyHostToDevice); cudaMemcpy(dmin_dist, init_dist, sizeof(float), cudaMemcpyHostToDevice); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&(time_array[0]), start, stop); // Invoke kernel cudaEventRecord( start, 0 ); // ------------------------------------------------------------ if((num_points) <= MAX_BLOCK_SIZE) { dim3 ablock(num_points, 1, 1); dim3 agrid(1, 1); printf("Can fit all points on one block: agrid=(%d, %d, %d) ablock=(%d, %d, %d)\n", agrid.x, agrid.y, agrid.z, ablock.x, ablock.y, ablock.z); minimum_distance<<<agrid, ablock>>>(dVx, dVy, dmin_dist, num_points); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(err)); } else { dim3 ablock(MAX_BLOCK_SIZE, 1, 1); int numBlocks = (num_points) / MAX_BLOCK_SIZE; dim3 agrid(numBlocks, numBlocks); printf("Must use multiple blocks: agrid=(%d, %d, %d) ablock=(%d, %d, %d)\n", agrid.x, agrid.y, agrid.z, ablock.x, ablock.y, ablock.z); minimum_distance<<<agrid, ablock>>>(dVx, dVy, dmin_dist, num_points); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(err)); } // ------------------------------------------------------------ cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&(time_array[1]), start, stop); // Copy result from device memory to host memory cudaEventRecord( start, 0 ); cudaMemcpy(&hmin_dist, dmin_dist, sizeof(float), cudaMemcpyDeviceToHost); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&(time_array[2]), start, stop); // Compute minimum distance on host to check device computation clock_gettime(CLOCK_REALTIME, &cpu_start); min_distance = minimum_distance_host(hVx, hVy, num_points); clock_gettime(CLOCK_REALTIME, &cpu_stop); time_array[3] = 1000*((cpu_stop.tv_sec-cpu_start.tv_sec) +0.000000001*(cpu_stop.tv_nsec-cpu_start.tv_nsec)); // Print results printf("Number of Points = %d\n", num_points); printf("GPU Host-to-device = %f ms \n", time_array[0]); printf("GPU Device-to-host = %f ms \n", time_array[2]); printf("GPU execution time = %f ms \n", time_array[1]); printf("CPU execution time = %f ms\n", time_array[3]); printf("Min. distance (GPU) = %e\n", hmin_dist); printf("Min. distance (CPU) = %e\n", min_distance); printf("Relative error = %e\n", fabs(min_distance-hmin_dist)/min_distance); // Free device memory cudaFree(dVx); cudaFree(dVy); cudaFree(dmin_dist); // Free host memory free(hVx); free(hVy); }
afe20826957775a8befd6616dc286b8a3363e570.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <roctracer/roctx.h> #include <argparse/argparse.hpp> #include "common.hpp" /* NOTE: A and C are column major, B is row major */ __global__ void mygemm(float *__restrict__ c, //<! [out] and MxN matrix const float *a, //<! [in] an MxK matrix const float *b, //<! [in] an KxN matrix const int M, const int N, const int K) { #define A(_i, _j) a[(_i) + (_j)*M] #define B(_i, _j) b[(_i)*N + (_j)] #define C(_i, _j) c[(_i) + (_j)*M] int gidx = blockDim.x * blockIdx.x + threadIdx.x; int gidy = blockDim.y * blockIdx.y + threadIdx.y; for (int i = gidy; i < M; i += gridDim.y * blockDim.y) { for (int j = gidx; j < N; j += gridDim.x * blockDim.x) { float acc = 0; for (int k = 0; k < K; ++k) { acc += A(i, k) * B(k, j); } C(i, j) = acc; } } #undef A #undef B #undef C } /* Time the total transfer & matrix-multiplication time */ int main(int argc, char **argv) { argparse::Parser parser; // default matrix sizes: // A: 1600 x 1500 // B: 1500 x 1400 // C: 1600 x 1400 int m = 1600; int n = 1400; int k = 1500; int nIters = 5; int nWarmup = 5; parser.add_positional(m); parser.add_positional(n); parser.add_positional(k); parser.add_option(nIters, "--iters"); parser.add_option(nWarmup, "--warmup"); if (!parser.parse(argc, argv)) { parser.help(); exit(EXIT_FAILURE); } const int64_t flop = int64_t(m) * int64_t(n) * int64_t(k) * 2 * nIters; // initialize host data std::cout << "generate data\n"; roctxRangePush("generate data"); float *aHost, *bHost, *cHost; CUDA_RUNTIME(hipHostMalloc(&aHost, m * k * sizeof(float), 0)); CUDA_RUNTIME(hipHostMalloc(&bHost, k * n * sizeof(float), 0)); CUDA_RUNTIME(hipHostMalloc(&cHost, m * n * sizeof(float), 0)); std::generate(aHost, aHost + m * k, random_int); std::generate(bHost, bHost + k * n, random_int); roctxRangePop(); // allocate device data float *aDev, *bDev, *cDev; CUDA_RUNTIME(hipMalloc(&aDev, m * k * sizeof(float))); CUDA_RUNTIME(hipMalloc(&bDev, k * n * sizeof(float))); CUDA_RUNTIME(hipMalloc(&cDev, m * n * sizeof(float))); // create events to time GPU kernel hipEvent_t start, stop; CUDA_RUNTIME(hipEventCreate(&start)); CUDA_RUNTIME(hipEventCreate(&stop)); // GPU kernel launch parameters dim3 dimBlock(32, 32); dim3 dimGrid; dimGrid.x = (n + dimBlock.x - 1) / dimBlock.x; dimGrid.y = (m + dimBlock.y - 1) / dimBlock.y; float kernelTime = 0; float wallTime = 0; for (int iter = 0; iter < nWarmup + nIters; ++iter) { auto wallStart = Clock::now(); // copy data to device roctxRangePush("host-to-device"); CUDA_RUNTIME( hipMemcpy(aDev, aHost, m * k * sizeof(float), hipMemcpyDefault)); CUDA_RUNTIME( hipMemcpy(bDev, bHost, k * n * sizeof(float), hipMemcpyDefault)); roctxRangePop(); // kernel time float millis; CUDA_RUNTIME(hipEventRecord(start)); hipLaunchKernelGGL(( mygemm), dim3(dimGrid), dim3(dimBlock), 0, 0, cDev, aDev, bDev, m, n, k); CUDA_RUNTIME(hipEventRecord(stop)); CUDA_RUNTIME(hipEventSynchronize(stop)); CUDA_RUNTIME(hipEventElapsedTime(&millis, start, stop)); // copy data back to host roctxRangePush("device-to-host"); CUDA_RUNTIME( hipMemcpy(cHost, cDev, m * n * sizeof(float), hipMemcpyDefault)); roctxRangePop(); CUDA_RUNTIME(hipDeviceSynchronize()); Duration wallElapsed = Clock::now() - wallStart; std::cout << iter << " kernel=" << millis / 1000 << " wall=" << wallElapsed.count() << (iter >= nWarmup ? " *" : " ") << "\n"; // track time if no longer during warmup if (iter >= nWarmup) { wallTime += wallElapsed.count(); kernelTime += millis / 1000; // seconds } } // print results double kernelGflops = flop / 1e9 / kernelTime; std::cout << "kernel " << kernelGflops << "GFLOPS (" << flop << " flop, " << kernelTime << "s)\n"; double wallGflops = flop / 1e9 / wallTime; std::cout << "wall " << wallGflops << "GFLOPS (" << flop << " flop, " << wallTime << "s)\n"; // release resources CUDA_RUNTIME(hipEventDestroy(start)); CUDA_RUNTIME(hipEventDestroy(stop)); CUDA_RUNTIME(hipFree(aDev)); CUDA_RUNTIME(hipFree(bDev)); CUDA_RUNTIME(hipFree(cDev)); CUDA_RUNTIME(hipHostFree(aHost)); CUDA_RUNTIME(hipHostFree(bHost)); CUDA_RUNTIME(hipHostFree(cHost)); return 0; }
afe20826957775a8befd6616dc286b8a3363e570.cu
#include <algorithm> #include <nvToolsExt.h> #include <argparse/argparse.hpp> #include "common.hpp" /* NOTE: A and C are column major, B is row major */ __global__ void mygemm(float *__restrict__ c, //<! [out] and MxN matrix const float *a, //<! [in] an MxK matrix const float *b, //<! [in] an KxN matrix const int M, const int N, const int K) { #define A(_i, _j) a[(_i) + (_j)*M] #define B(_i, _j) b[(_i)*N + (_j)] #define C(_i, _j) c[(_i) + (_j)*M] int gidx = blockDim.x * blockIdx.x + threadIdx.x; int gidy = blockDim.y * blockIdx.y + threadIdx.y; for (int i = gidy; i < M; i += gridDim.y * blockDim.y) { for (int j = gidx; j < N; j += gridDim.x * blockDim.x) { float acc = 0; for (int k = 0; k < K; ++k) { acc += A(i, k) * B(k, j); } C(i, j) = acc; } } #undef A #undef B #undef C } /* Time the total transfer & matrix-multiplication time */ int main(int argc, char **argv) { argparse::Parser parser; // default matrix sizes: // A: 1600 x 1500 // B: 1500 x 1400 // C: 1600 x 1400 int m = 1600; int n = 1400; int k = 1500; int nIters = 5; int nWarmup = 5; parser.add_positional(m); parser.add_positional(n); parser.add_positional(k); parser.add_option(nIters, "--iters"); parser.add_option(nWarmup, "--warmup"); if (!parser.parse(argc, argv)) { parser.help(); exit(EXIT_FAILURE); } const int64_t flop = int64_t(m) * int64_t(n) * int64_t(k) * 2 * nIters; // initialize host data std::cout << "generate data\n"; nvtxRangePush("generate data"); float *aHost, *bHost, *cHost; CUDA_RUNTIME(cudaHostAlloc(&aHost, m * k * sizeof(float), 0)); CUDA_RUNTIME(cudaHostAlloc(&bHost, k * n * sizeof(float), 0)); CUDA_RUNTIME(cudaHostAlloc(&cHost, m * n * sizeof(float), 0)); std::generate(aHost, aHost + m * k, random_int); std::generate(bHost, bHost + k * n, random_int); nvtxRangePop(); // allocate device data float *aDev, *bDev, *cDev; CUDA_RUNTIME(cudaMalloc(&aDev, m * k * sizeof(float))); CUDA_RUNTIME(cudaMalloc(&bDev, k * n * sizeof(float))); CUDA_RUNTIME(cudaMalloc(&cDev, m * n * sizeof(float))); // create events to time GPU kernel cudaEvent_t start, stop; CUDA_RUNTIME(cudaEventCreate(&start)); CUDA_RUNTIME(cudaEventCreate(&stop)); // GPU kernel launch parameters dim3 dimBlock(32, 32); dim3 dimGrid; dimGrid.x = (n + dimBlock.x - 1) / dimBlock.x; dimGrid.y = (m + dimBlock.y - 1) / dimBlock.y; float kernelTime = 0; float wallTime = 0; for (int iter = 0; iter < nWarmup + nIters; ++iter) { auto wallStart = Clock::now(); // copy data to device nvtxRangePush("host-to-device"); CUDA_RUNTIME( cudaMemcpy(aDev, aHost, m * k * sizeof(float), cudaMemcpyDefault)); CUDA_RUNTIME( cudaMemcpy(bDev, bHost, k * n * sizeof(float), cudaMemcpyDefault)); nvtxRangePop(); // kernel time float millis; CUDA_RUNTIME(cudaEventRecord(start)); mygemm<<<dimGrid, dimBlock>>>(cDev, aDev, bDev, m, n, k); CUDA_RUNTIME(cudaEventRecord(stop)); CUDA_RUNTIME(cudaEventSynchronize(stop)); CUDA_RUNTIME(cudaEventElapsedTime(&millis, start, stop)); // copy data back to host nvtxRangePush("device-to-host"); CUDA_RUNTIME( cudaMemcpy(cHost, cDev, m * n * sizeof(float), cudaMemcpyDefault)); nvtxRangePop(); CUDA_RUNTIME(cudaDeviceSynchronize()); Duration wallElapsed = Clock::now() - wallStart; std::cout << iter << " kernel=" << millis / 1000 << " wall=" << wallElapsed.count() << (iter >= nWarmup ? " *" : " ") << "\n"; // track time if no longer during warmup if (iter >= nWarmup) { wallTime += wallElapsed.count(); kernelTime += millis / 1000; // seconds } } // print results double kernelGflops = flop / 1e9 / kernelTime; std::cout << "kernel " << kernelGflops << "GFLOPS (" << flop << " flop, " << kernelTime << "s)\n"; double wallGflops = flop / 1e9 / wallTime; std::cout << "wall " << wallGflops << "GFLOPS (" << flop << " flop, " << wallTime << "s)\n"; // release resources CUDA_RUNTIME(cudaEventDestroy(start)); CUDA_RUNTIME(cudaEventDestroy(stop)); CUDA_RUNTIME(cudaFree(aDev)); CUDA_RUNTIME(cudaFree(bDev)); CUDA_RUNTIME(cudaFree(cDev)); CUDA_RUNTIME(cudaFreeHost(aHost)); CUDA_RUNTIME(cudaFreeHost(bHost)); CUDA_RUNTIME(cudaFreeHost(cHost)); return 0; }
dc7662f6f3770c4222241ee06104924d2d45360b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "nbody_helper2.h" // time stamp function in seconds double getTimeStamp() { struct timeval tv; gettimeofday (&tv, NULL); return (double) tv.tv_usec/1000000 + tv.tv_sec; } // HELPER FUNCTIONS inline float3 scalevec (float3 &v0, float scalar) { float3 rt = v0; rt.x *= scalar; rt.y *= scalar; return rt; } inline float dot (float2 v0, float2 v1) { return v0.x*v1.x + v0.y*v1.y; } inline float normalize (float2 v0) { float dist = sqrtf(dot(v0, v0)); if (dist > 1e-6) { v0.x /= dist; v0.y /= dist; } else { v0.x *= 1e6; v0.y *= 1e6; } return dist; } inline float3 cross (float3 v0, float3 v1) { float3 v2; v2.x = v0.y*v1.z - v0.z*v1.y; v2.y = v0.z*v1.x - v0.x*v1.z; v2.z = v0.z*v1.y - v0.y*v1.x; return v2; } inline float rand_sign () { return (rand()-RAND_MAX) >= 0 ? 1.0 : -1.0; } // void print_BodyStats (const float3 *r, const float3 *v, const float3 *a, const unsigned long nElem) // { // printf("\n"); // unsigned long idx; // // print body number // for (idx=0; idx<nElem; idx++) { // if (idx == nElem-1) // printf("Mass %ld\n", idx); // else // printf("Mass %ld\t", idx); // } // // print Mass // for (idx=0; idx<nElem; idx++) { // if (idx == nElem-1) // printf("%.2f\n", r[idx].z); // else // printf("%.2f\t", m[idx]); // } // // print position // for (dim=0; dim<ND; dim++) { // for (unsigned long idx=0; idx<nElem; idx++) { // if (idx == nElem-1) // printf("%.2f\n", *(r+idx*(ND+1)+dim)); // else // printf("%.2f\t", *(r+idx*(ND+1)+dim)); // } // } // // print velocity // for (dim=0; dim<ND; dim++) { // for (unsigned long idx=0; idx<nElem; idx++) { // if (idx == nElem-1) // printf("%.2f\n", v[ND*idx + dim]); // else // printf("%.2f\t", v[ND*idx + dim]); // } // } // // print acceleration // for (dim=0; dim<ND; dim++) { // for (unsigned long idx=0; idx<nElem; idx++) { // if (idx == nElem-1) // printf("%.2f\n", a[ND*idx + dim]); // else // printf("%.2f\t", a[ND*idx + dim]); // } // } // } void init_MassPositionVelocity (float3 *r, float3 *v, const unsigned long nElem, const unsigned int config) { // generate different seed for pseudo-random number generator // time_t t; // srand ((unsigned int) time(&t)); srand ((unsigned int) 1000); // populating mass, position, & velocity arrays unsigned long idx; float mass_range = MAX_MASS - MIN_MASS; float x_width = 300.0; float y_width = 300.0; float x_mid = X_RES/2; //float x_max = (X_RES + x_width)/2; float x_min = (X_RES - x_width)/2; float y_mid = Y_RES/2; //float y_max = (Y_RES + y_width)/2; float y_min = (Y_RES - y_width)/2; float x, y, radius, angle, system_mass, speed_factor, tangential_speed; float shell_radius, shell_thickness, radial_velocity; float2 CoM, dist, unit_dist; switch (config) { case RANDOM_SQUARE_NO_VEL: printf("Initializing positions and mass\n"); for (idx=0; idx<nElem; idx++) { r[idx].x = (float) ((double) rand()/RAND_MAX) * x_width + x_min; r[idx].y = (float) ((double) rand()/RAND_MAX) * y_width + y_min; r[idx].z = (float) ((double) rand()/RAND_MAX) * mass_range + MIN_MASS; v[idx] = (float3) {0.0f, 0.0f, 0.0f}; // printf("Body %ld\t x: %.6f\ty: %.6f\t m: %.6f\n", // idx, r[idx].x, r[idx].y, r[idx].z); } break; case RANDOM_CIRCLE_NO_VEL: for (idx=0; idx<nElem; idx++) { radius = (float) (rand()/RAND_MAX) * y_width/2; x = (float) (rand()/RAND_MAX) * radius * rand_sign(); y = sqrt(radius*radius - x*x) * rand_sign(); r[idx].x = x_mid + x; r[idx].y = y_mid + y;; r[idx].z = (float) (rand()/RAND_MAX) * mass_range + MIN_MASS; v[idx] = (float3) {0.0f, 0.0f, 0.0f}; } break; case EXPAND_SHELL: shell_radius = y_width/2; shell_thickness = 0.25*shell_radius; CoM = (float2) {0.0f, 0.0f}; system_mass = 0.0; speed_factor=0.1; for (idx=0; idx<nElem; idx++) { // radius is the distance of point from center of window radius = (float) (rand()/RAND_MAX)*shell_thickness - shell_thickness/2 + shell_radius; x = (float) (rand()/RAND_MAX) * radius * rand_sign(); y = sqrt(radius*radius - x*x) * rand_sign(); r[idx].x = x_mid + x; r[idx].y = y_mid + y;; r[idx].z = (float) (rand()/RAND_MAX) * mass_range + MIN_MASS; CoM.x += r[idx].z * r[idx].x; CoM.y += r[idx].z * r[idx].y; system_mass += r[idx].z; } CoM.x /= system_mass; CoM.y /= system_mass; for (idx=0; idx<nElem; idx++) { // radius is now the distance of point from Center of Mass (CoM) dist.x = r[idx].x - CoM.x; dist.y = r[idx].y - CoM.y; angle = (float) atan(dist.y/dist.x); radius = sqrtf(dist.x*dist.x + dist.y*dist.y); radial_velocity = speed_factor * sqrtf(2*G*system_mass/radius); v[idx].x = radial_velocity * (float) cos(angle); v[idx].y = radial_velocity * (float) sin(angle); v[idx].z = 0.0f; } break; case SPIRAL_SINGLE_GALAXY: CoM = (float2) {0.0f, 0.0f}; system_mass = 0.0; for (idx=0; idx<nElem; idx++) { if (idx == 0) { r[idx].x = x_mid; r[idx].y = y_mid; r[idx].z = ((float) (rand()/RAND_MAX) * mass_range + MIN_MASS)*10000; } else { r[idx].x = (float) (rand()/RAND_MAX) * x_width + x_min; r[idx].y = (float) (rand()/RAND_MAX) * y_width + y_min; r[idx].z = (float) (rand()/RAND_MAX) * mass_range + MIN_MASS; } CoM.x += r[idx].z * r[idx].x; CoM.y += r[idx].z * r[idx].y; system_mass += r[idx].z; } CoM.x /= system_mass; CoM.y /= system_mass; for (idx=0; idx<nElem; idx++) { // radius is now the distance of point from Center of Mass (CoM) dist.x = r[idx].x - CoM.x; dist.y = r[idx].y - CoM.y; radius = sqrtf(dist.x*dist.x + dist.y*dist.y); unit_dist.x = dist.x / radius; unit_dist.y = dist.y / radius; tangential_speed = sqrtf(G*system_mass/radius) * 1.1; v[idx].x = unit_dist.y * tangential_speed; v[idx].y = -1*unit_dist.x * tangential_speed; v[idx].z = 0.0f; } break; case SPIRAL_DOUBLE_GALAXY: CoM = (float2) {0.0f, 0.0f}; system_mass = 0.0; for (idx=0; idx<nElem; idx++) { if (idx == 0) { r[idx].x = x_mid; r[idx].y = y_mid; r[idx].z = ((float) (rand()/RAND_MAX) * mass_range + MIN_MASS)*10000; } else { r[idx].x = (float) (rand()/RAND_MAX) * x_width + x_min; r[idx].y = (float) (rand()/RAND_MAX) * y_width + y_min; r[idx].z = (float) (rand()/RAND_MAX) * mass_range + MIN_MASS; } CoM.x += r[idx].z * r[idx].x; CoM.y += r[idx].z * r[idx].y; system_mass += r[idx].z; } CoM.x /= system_mass; CoM.y /= system_mass; for (idx=0; idx<nElem; idx++) { // radius is now the distance of point from Center of Mass (CoM) dist.x = r[idx].x - CoM.x; dist.y = r[idx].y - CoM.y; radius = sqrtf(dist.x*dist.x + dist.y*dist.y); unit_dist.x = dist.x / radius; unit_dist.y = dist.y / radius; tangential_speed = sqrtf(G*system_mass/radius) * 1.1; v[idx].x = unit_dist.y * tangential_speed; v[idx].y = -1*unit_dist.x * tangential_speed; v[idx].z = 0.0f; } break; case SPIRAL_QUAD_GALAXY: CoM = (float2) {0.0f, 0.0f}; system_mass = 0.0; for (idx=0; idx<nElem; idx++) { if (idx == 0) { r[idx].x = x_mid; r[idx].y = y_mid; r[idx].z = ((float) (rand()/RAND_MAX) * mass_range + MIN_MASS)*10000; } else { r[idx].x = (float) (rand()/RAND_MAX) * x_width + x_min; r[idx].y = (float) (rand()/RAND_MAX) * y_width + y_min; r[idx].z = (float) (rand()/RAND_MAX) * mass_range + MIN_MASS; } CoM.x += r[idx].z * r[idx].x; CoM.y += r[idx].z * r[idx].y; system_mass += r[idx].z; } CoM.x /= system_mass; CoM.y /= system_mass; for (idx=0; idx<nElem; idx++) { // radius is now the distance of point from Center of Mass (CoM) dist.x = r[idx].x - CoM.x; dist.y = r[idx].y - CoM.y; radius = sqrtf(dist.x*dist.x + dist.y*dist.y); unit_dist.x = dist.x / radius; unit_dist.y = dist.y / radius; tangential_speed = sqrtf(G*system_mass/radius) * 1.1; v[idx].x = unit_dist.y * tangential_speed; v[idx].y = -1*unit_dist.x * tangential_speed; v[idx].z = 0.0f; } break; default: for (idx=0; idx<nElem; idx++) { radius = (float) (rand()/RAND_MAX) * y_width/2; x = (float) (rand()/RAND_MAX) * radius * rand_sign(); y = sqrt(radius*radius - x*x) * rand_sign(); r[idx].x = x_mid + x; r[idx].y = y_mid + y;; r[idx].z = (float) (rand()/RAND_MAX) * mass_range + MIN_MASS; v[idx] = (float3) {0.0f, 0.0f, 0.0f}; } break; } } /*void *init_Acceleration_SMT (void *arg) { // define local variables for convenience unsigned long start, end, len, offset, nElem; nElem = US.nElem; offset = (unsigned long) arg; len = (unsigned long) US.nElem / NUM_CPU_THREADS; start = offset * len; end = start + len; unsigned long i, j; float ax_ip1, ay_ip1, az_ip1; float dx_ip1, dy_ip1, dz_ip1, rDistSquared, MinvDistCubed; float **i_r = &(US.r1); float **o_a = &(US.a1); // calculating NEXT acceleration of each body from the position of every other bodies // ... and NEXT velocity of each body utilizing the next acceleration for (i=start; i<end; i++) { ax_ip1 = 0.0; ay_ip1 = 0.0; az_ip1 = 0.0; for (j=0; j<nElem; j++) { dx_ip1 = *(*i_r + (ND*j+0)) - *(*i_r + (ND*i+0)); dy_ip1 = *(*i_r + (ND*j+1)) - *(*i_r + (ND*i+1)); dz_ip1 = *(*i_r + (ND*j+2)) - *(*i_r + (ND*i+2)); rDistSquared = dx_ip1*dx_ip1 + dy_ip1*dy_ip1 + dz_ip1*dz_ip1 + SOFTENING; MinvDistCubed = US.m[j]/sqrtf(rDistSquared*rDistSquared*rDistSquared); ax_ip1 += dx_ip1 * MinvDistCubed; ay_ip1 += dy_ip1 * MinvDistCubed; az_ip1 += dz_ip1 * MinvDistCubed; } *(*o_a + (ND*i+0)) = G*ax_ip1; *(*o_a + (ND*i+1)) = G*ay_ip1; *(*o_a + (ND*i+2)) = G*az_ip1; } pthread_exit (NULL); }*/ void print_simulationParameters (unsigned long nElem, unsigned long nIter, unsigned int cpu_threads) { printf("\n===== Simulation Parameters =====\n\n"); printf(" Number of Bodies = %ld\n", nElem); printf(" Number of Time Steps = %ld\n", nIter); printf(" Number of CPU Threads = %d\n\n", cpu_threads); printf("=================================\n\n\n"); } void print_deviceProperties (int dev, int driverVersion, int runtimeVersion, hipDeviceProp_t deviceProp) { printf("\n===== Device Properties ======\n\n"); printf(" Device %d: %s\n", dev, deviceProp.name); printf(" CUDA Driver Version / Runtime Version: %d.%d / %d.%d\n", driverVersion/1000, (driverVersion%100)/10, runtimeVersion/1000, (runtimeVersion%100)/10); printf(" CUDA Capability Major/Minor version number: %d.%d\n", deviceProp.major, deviceProp.minor); printf(" Number of SMs: %d\n", deviceProp.multiProcessorCount); printf(" Total amount of global memory: %.2f GB (%llu B)\n", (float) deviceProp.totalGlobalMem/pow(1024.0,3), (unsigned long long) deviceProp.totalGlobalMem); printf(" Total amount of constant memory: %4.2f kB\n", deviceProp.totalConstMem/1024.0); printf(" Total amount of shared memory per block: %4.2f kB\n", deviceProp.sharedMemPerBlock/1024.0); printf(" Total number of registers available per block: %d\n", deviceProp.regsPerBlock); printf(" Warp size: %d\n", deviceProp.warpSize); printf(" Maximum number of threads per block: %d\n", deviceProp.maxThreadsPerBlock); printf(" Maximum number of threads per SM: %d\n", deviceProp.maxThreadsPerMultiProcessor); printf(" Maximum number of warps per SM: %d\n", deviceProp.maxThreadsPerMultiProcessor/32); printf(" Maximum size of each block dimension: %d x %d x %d\n", deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1], deviceProp.maxThreadsDim[2]); printf(" Maximum size of each grid dimension: %d x %d x %d\n", deviceProp.maxGridSize[0], deviceProp.maxGridSize[1], deviceProp.maxGridSize[2]); printf(" Maximum memory pitch: %lu B\n", deviceProp.memPitch); printf(" Memory Clock Rate (MHz): %.1f\n", deviceProp.memoryClockRate/1e3); printf(" Memory Bus Width (b): %d\n", deviceProp.memoryBusWidth); printf(" Peak Memory Bandwidth (GB/s): %.2f\n\n", 2.0*deviceProp.memoryClockRate*(deviceProp.memoryBusWidth/8)/1e6); } __device__ float2 bodyBodyInteraction (float2 ai, const float3 bi, const float3 bj) { float2 dist; dist.x = bj.x - bi.x; dist.y = bj.y - bi.y; float distSqr = dist.x*dist.x + dist.y*dist.y + SOFTENING; float invDistCube = rsqrtf(distSqr * distSqr * distSqr); float s = bj.z * invDistCube; ai.x += s * dist.x; ai.y += s * dist.y; return ai; } __global__ void initAcceleration (float3 *devA, const float3 *__restrict__ devX, const unsigned nTiles) { unsigned int gtid = blockIdx.x * BLOCK_SIZE + threadIdx.x; __shared__ float3 shPosition3[BLOCK_SIZE]; float3 myPosition3; float2 acc2 = {0.0f, 0.0f}; myPosition3 = devX[gtid]; for (unsigned tile=0; tile<nTiles; tile++) { shPosition3[threadIdx.x] = devX[ tile*BLOCK_SIZE + threadIdx.x ]; __syncthreads(); // Wait for all threads in block to load data // ... into shared memory #pragma unroll (16) for (unsigned j=0; j<BLOCK_SIZE; j++) acc2 = bodyBodyInteraction(acc2, myPosition3, shPosition3[j]); __syncthreads(); // wait for all threads in block to complete their // ... computations to not overwrite sh. mem. } devA[gtid] = (float3) {G*acc2.x, G*acc2.y, 0.0f}; } __device__ float3 calcAcceleration (const float3 *__restrict__ devX, const unsigned nTiles) { unsigned int gtid = blockIdx.x * BLOCK_SIZE + threadIdx.x; __shared__ float3 shPosition3[BLOCK_SIZE]; float3 myPosition3; float2 acc2 = {0.0f, 0.0f}; myPosition3 = devX[gtid]; for (unsigned tile=0; tile<nTiles; tile++) { shPosition3[threadIdx.x] = devX[ tile*BLOCK_SIZE + threadIdx.x ]; __syncthreads(); // Wait for all threads in block to load data // ... into shared memory #pragma unroll (16) for (unsigned j=0; j<BLOCK_SIZE; j++) acc2 = bodyBodyInteraction(acc2, myPosition3, shPosition3[j]); __syncthreads(); // wait for all threads in block to complete their // ... computations to not overwrite sh. mem. } float3 acc3 = {G*acc2.x, G*acc2.y, 0.0f}; return acc3; } __global__ void calcIntegration (float3 *devX_ip1, const float3 *__restrict__ devX_i, float3 *devV_i, float3 *devA_i, const unsigned nElem, const unsigned nTiles) { unsigned int gtid = blockIdx.x * blockDim.x + threadIdx.x; if (gtid < nElem) { // if (gtid == 1) // printf("x: %.6f\ty: %.6f\tm: %.6f\n", devX_i[gtid].x, devX_i[gtid].y, devX_i[gtid].z); float3 old_acc = devA_i[gtid]; float3 old_vel = devV_i[gtid]; float3 old_pos = devX_i[gtid]; devX_ip1[gtid].x = old_pos.x + old_vel.x*DT + old_acc.x*DTSQd2; devX_ip1[gtid].y = old_pos.y + old_vel.y*DT + old_acc.y*DTSQd2; float3 new_acc = calcAcceleration (devX_i, nTiles); devV_i [gtid].x = old_vel.x + (old_acc.x + new_acc.x)*DTd2; devV_i [gtid].y = old_vel.y + (old_acc.y + new_acc.y)*DTd2; devA_i [gtid] = new_acc; } } /*void *computeHost_SMT (void *arg) { // define local variables for convenience unsigned long start, end, len, offset, nElem, nIter; nElem = US.nElem; nIter = US.nIter; offset = (unsigned long) arg; len = (unsigned long) nElem / NUM_CPU_THREADS; start = offset * len; end = start + len; unsigned long i, j; float3 pos_i, accel_ip1, dpos_ip1; float rDistSquared, MinvDistCubed; float **i_r, **i_v, **i_a; float **o_r, **o_v, **o_a; for (unsigned long iter=0; iter<nIter; iter++) { // since the computation cannot be done inplace, we constantly need to // swap where the input and output data locations are if (iter % 2 == 0) { i_r = &(US.r1); i_v = &(US.v1); i_a = &(US.a1); o_r = &(US.r2); o_v = &(US.v2); o_a = &(US.a2); } else { i_r = &(US.r2); i_v = &(US.v2); i_a = &(US.a2); o_r = &(US.r1); o_v = &(US.v1); o_a = &(US.a1); } // calculating NEXT position of each body for (i=start; i<end; i++) { if (i % 100 == 0) { *(*o_r + (ND*i)) = *(*i_r + (ND*i)); *(*o_r + (ND*i+1)) = *(*i_r + (ND*i+1)); *(*o_r + (ND*i+2)) = *(*i_r + (ND*i+2)); } else { *(*o_r + (ND*i)) = *(*i_r + (ND*i)) + *(*i_v + (ND*i))*DT + *(*i_a + (ND*i)) *DTSQd2; *(*o_r + (ND*i+1)) = *(*i_r + (ND*i+1)) + *(*i_v + (ND*i+1))*DT + *(*i_a + (ND*i+1))*DTSQd2; *(*o_r + (ND*i+2)) = *(*i_r + (ND*i+2)) + *(*i_v + (ND*i+2))*DT + *(*i_a + (ND*i+2))*DTSQd2; } } // position computation done pthread_mutex_lock (&count_mutex); count++; if (count == NUM_CPU_THREADS*(2*iter+1)) { pthread_cond_broadcast (&count_condition); //printf("Broadcasting by tid=%ld\n", offset); } else { do { pthread_cond_wait (&count_condition, &count_mutex); //printf("Condition Broadcast received by tid=%ld\n", offset); } while (count < NUM_CPU_THREADS*(2*iter+1)); } pthread_mutex_unlock (&count_mutex); // calculating NEXT acceleration of each body from the position of every other bodies // ... and NEXT velocity of each body utilizing the next acceleration for (i=start; i<end; i++) { pos_i.x = *(*o_r + (ND*i+0)); pos_i.y = *(*o_r + (ND*i+1)); pos_i.z = *(*o_r + (ND*i+2)); accel_ip1 = (float3) {.x=0.0f, .y=0.0f, .z=0.0f}; // unrolling this loop 8x for ~2% performance improvement j = 0; while (j < nElem) { dpos_ip1.x = *(*o_r + (ND*j+0)) - pos_i.x; dpos_ip1.y = *(*o_r + (ND*j+1)) - pos_i.y; dpos_ip1.z = *(*o_r + (ND*j+2)) - pos_i.z; rDistSquared = dpos_ip1.x*dpos_ip1.x + dpos_ip1.y*dpos_ip1.y + dpos_ip1.z*dpos_ip1.z + SOFTENING; MinvDistCubed = US.m[j]/sqrtf(rDistSquared*rDistSquared*rDistSquared); accel_ip1.x += dpos_ip1.x * MinvDistCubed; accel_ip1.y += dpos_ip1.y * MinvDistCubed; accel_ip1.z += dpos_ip1.z * MinvDistCubed; j++; // unroll #1 dpos_ip1.x = *(*o_r + (ND*j+0)) - pos_i.x; dpos_ip1.y = *(*o_r + (ND*j+1)) - pos_i.y; dpos_ip1.z = *(*o_r + (ND*j+2)) - pos_i.z; rDistSquared = dpos_ip1.x*dpos_ip1.x + dpos_ip1.y*dpos_ip1.y + dpos_ip1.z*dpos_ip1.z + SOFTENING; MinvDistCubed = US.m[j]/sqrtf(rDistSquared*rDistSquared*rDistSquared); accel_ip1.x += dpos_ip1.x * MinvDistCubed; accel_ip1.y += dpos_ip1.y * MinvDistCubed; accel_ip1.z += dpos_ip1.z * MinvDistCubed; j++; // unroll #2 dpos_ip1.x = *(*o_r + (ND*j+0)) - pos_i.x; dpos_ip1.y = *(*o_r + (ND*j+1)) - pos_i.y; dpos_ip1.z = *(*o_r + (ND*j+2)) - pos_i.z; rDistSquared = dpos_ip1.x*dpos_ip1.x + dpos_ip1.y*dpos_ip1.y + dpos_ip1.z*dpos_ip1.z + SOFTENING; MinvDistCubed = US.m[j]/sqrtf(rDistSquared*rDistSquared*rDistSquared); accel_ip1.x += dpos_ip1.x * MinvDistCubed; accel_ip1.y += dpos_ip1.y * MinvDistCubed; accel_ip1.z += dpos_ip1.z * MinvDistCubed; j++; // unroll #3 dpos_ip1.x = *(*o_r + (ND*j+0)) - pos_i.x; dpos_ip1.y = *(*o_r + (ND*j+1)) - pos_i.y; dpos_ip1.z = *(*o_r + (ND*j+2)) - pos_i.z; rDistSquared = dpos_ip1.x*dpos_ip1.x + dpos_ip1.y*dpos_ip1.y + dpos_ip1.z*dpos_ip1.z + SOFTENING; MinvDistCubed = US.m[j]/sqrtf(rDistSquared*rDistSquared*rDistSquared); accel_ip1.x += dpos_ip1.x * MinvDistCubed; accel_ip1.y += dpos_ip1.y * MinvDistCubed; accel_ip1.z += dpos_ip1.z * MinvDistCubed; j++; // unroll #4 dpos_ip1.x = *(*o_r + (ND*j+0)) - pos_i.x; dpos_ip1.y = *(*o_r + (ND*j+1)) - pos_i.y; dpos_ip1.z = *(*o_r + (ND*j+2)) - pos_i.z; rDistSquared = dpos_ip1.x*dpos_ip1.x + dpos_ip1.y*dpos_ip1.y + dpos_ip1.z*dpos_ip1.z + SOFTENING; MinvDistCubed = US.m[j]/sqrtf(rDistSquared*rDistSquared*rDistSquared); accel_ip1.x += dpos_ip1.x * MinvDistCubed; accel_ip1.y += dpos_ip1.y * MinvDistCubed; accel_ip1.z += dpos_ip1.z * MinvDistCubed; j++; // unroll #5 dpos_ip1.x = *(*o_r + (ND*j+0)) - pos_i.x; dpos_ip1.y = *(*o_r + (ND*j+1)) - pos_i.y; dpos_ip1.z = *(*o_r + (ND*j+2)) - pos_i.z; rDistSquared = dpos_ip1.x*dpos_ip1.x + dpos_ip1.y*dpos_ip1.y + dpos_ip1.z*dpos_ip1.z + SOFTENING; MinvDistCubed = US.m[j]/sqrtf(rDistSquared*rDistSquared*rDistSquared); accel_ip1.x += dpos_ip1.x * MinvDistCubed; accel_ip1.y += dpos_ip1.y * MinvDistCubed; accel_ip1.z += dpos_ip1.z * MinvDistCubed; j++; // unroll #6 dpos_ip1.x = *(*o_r + (ND*j+0)) - pos_i.x; dpos_ip1.y = *(*o_r + (ND*j+1)) - pos_i.y; dpos_ip1.z = *(*o_r + (ND*j+2)) - pos_i.z; rDistSquared = dpos_ip1.x*dpos_ip1.x + dpos_ip1.y*dpos_ip1.y + dpos_ip1.z*dpos_ip1.z + SOFTENING; MinvDistCubed = US.m[j]/sqrtf(rDistSquared*rDistSquared*rDistSquared); accel_ip1.x += dpos_ip1.x * MinvDistCubed; accel_ip1.y += dpos_ip1.y * MinvDistCubed; accel_ip1.z += dpos_ip1.z * MinvDistCubed; j++; // unroll #7 dpos_ip1.x = *(*o_r + (ND*j+0)) - pos_i.x; dpos_ip1.y = *(*o_r + (ND*j+1)) - pos_i.y; dpos_ip1.z = *(*o_r + (ND*j+2)) - pos_i.z; rDistSquared = dpos_ip1.x*dpos_ip1.x + dpos_ip1.y*dpos_ip1.y + dpos_ip1.z*dpos_ip1.z + SOFTENING; MinvDistCubed = US.m[j]/sqrtf(rDistSquared*rDistSquared*rDistSquared); accel_ip1.x += dpos_ip1.x * MinvDistCubed; accel_ip1.y += dpos_ip1.y * MinvDistCubed; accel_ip1.z += dpos_ip1.z * MinvDistCubed; j++; // unroll #8 } *(*o_a + (ND*i+0)) = G*accel_ip1.x; *(*o_a + (ND*i+1)) = G*accel_ip1.y; *(*o_a + (ND*i+2)) = G*accel_ip1.z; *(*o_v + (ND*i+0)) = *(*i_v + (ND*i+0)) + (*(*i_a + (ND*i+0))+accel_ip1.x)*DTd2; *(*o_v + (ND*i+1)) = *(*i_v + (ND*i+1)) + (*(*i_a + (ND*i+1))+accel_ip1.y)*DTd2; *(*o_v + (ND*i+2)) = *(*i_v + (ND*i+2)) + (*(*i_a + (ND*i+2))+accel_ip1.z)*DTd2; } // computation completed on thread. Acquire mutex to increment count variable... pthread_mutex_lock (&count_mutex); count++; if (count == NUM_CPU_THREADS*(2*iter+2)) { // writing to file for (unsigned int idx=0; idx<nElem; idx++) { fprintf(destFile, "%f,%f,%f\n", *(*o_r+ND*idx+0), *(*o_r+ND*idx+1), *(*o_r+ND*idx+2)); } pthread_cond_broadcast (&count_condition); //printf("Broadcasting by tid=%ld\n", offset); } else { do { pthread_cond_wait (&count_condition, &count_mutex); //printf("Condition Broadcast received by tid=%ld\n", offset); } while (count < NUM_CPU_THREADS*(2*iter+2)); } pthread_mutex_unlock (&count_mutex); if (offset == 1) { printf("%ld:\tx: %.6f\ty: %.6f\tz: %.6f\n", iter, *(*o_r + (ND*offset)), *(*o_r + (ND*offset)+1), *(*o_r + (ND*offset)+2)); } //if (offset == 0) // print_BodyStats (US.m, *o_r, *o_v, *o_a); } pthread_exit (NULL); }*/
dc7662f6f3770c4222241ee06104924d2d45360b.cu
#include "nbody_helper2.h" // time stamp function in seconds double getTimeStamp() { struct timeval tv; gettimeofday (&tv, NULL); return (double) tv.tv_usec/1000000 + tv.tv_sec; } // HELPER FUNCTIONS inline float3 scalevec (float3 &v0, float scalar) { float3 rt = v0; rt.x *= scalar; rt.y *= scalar; return rt; } inline float dot (float2 v0, float2 v1) { return v0.x*v1.x + v0.y*v1.y; } inline float normalize (float2 v0) { float dist = sqrtf(dot(v0, v0)); if (dist > 1e-6) { v0.x /= dist; v0.y /= dist; } else { v0.x *= 1e6; v0.y *= 1e6; } return dist; } inline float3 cross (float3 v0, float3 v1) { float3 v2; v2.x = v0.y*v1.z - v0.z*v1.y; v2.y = v0.z*v1.x - v0.x*v1.z; v2.z = v0.z*v1.y - v0.y*v1.x; return v2; } inline float rand_sign () { return (rand()-RAND_MAX) >= 0 ? 1.0 : -1.0; } // void print_BodyStats (const float3 *r, const float3 *v, const float3 *a, const unsigned long nElem) // { // printf("\n"); // unsigned long idx; // // print body number // for (idx=0; idx<nElem; idx++) { // if (idx == nElem-1) // printf("Mass %ld\n", idx); // else // printf("Mass %ld\t", idx); // } // // print Mass // for (idx=0; idx<nElem; idx++) { // if (idx == nElem-1) // printf("%.2f\n", r[idx].z); // else // printf("%.2f\t", m[idx]); // } // // print position // for (dim=0; dim<ND; dim++) { // for (unsigned long idx=0; idx<nElem; idx++) { // if (idx == nElem-1) // printf("%.2f\n", *(r+idx*(ND+1)+dim)); // else // printf("%.2f\t", *(r+idx*(ND+1)+dim)); // } // } // // print velocity // for (dim=0; dim<ND; dim++) { // for (unsigned long idx=0; idx<nElem; idx++) { // if (idx == nElem-1) // printf("%.2f\n", v[ND*idx + dim]); // else // printf("%.2f\t", v[ND*idx + dim]); // } // } // // print acceleration // for (dim=0; dim<ND; dim++) { // for (unsigned long idx=0; idx<nElem; idx++) { // if (idx == nElem-1) // printf("%.2f\n", a[ND*idx + dim]); // else // printf("%.2f\t", a[ND*idx + dim]); // } // } // } void init_MassPositionVelocity (float3 *r, float3 *v, const unsigned long nElem, const unsigned int config) { // generate different seed for pseudo-random number generator // time_t t; // srand ((unsigned int) time(&t)); srand ((unsigned int) 1000); // populating mass, position, & velocity arrays unsigned long idx; float mass_range = MAX_MASS - MIN_MASS; float x_width = 300.0; float y_width = 300.0; float x_mid = X_RES/2; //float x_max = (X_RES + x_width)/2; float x_min = (X_RES - x_width)/2; float y_mid = Y_RES/2; //float y_max = (Y_RES + y_width)/2; float y_min = (Y_RES - y_width)/2; float x, y, radius, angle, system_mass, speed_factor, tangential_speed; float shell_radius, shell_thickness, radial_velocity; float2 CoM, dist, unit_dist; switch (config) { case RANDOM_SQUARE_NO_VEL: printf("Initializing positions and mass\n"); for (idx=0; idx<nElem; idx++) { r[idx].x = (float) ((double) rand()/RAND_MAX) * x_width + x_min; r[idx].y = (float) ((double) rand()/RAND_MAX) * y_width + y_min; r[idx].z = (float) ((double) rand()/RAND_MAX) * mass_range + MIN_MASS; v[idx] = (float3) {0.0f, 0.0f, 0.0f}; // printf("Body %ld\t x: %.6f\ty: %.6f\t m: %.6f\n", // idx, r[idx].x, r[idx].y, r[idx].z); } break; case RANDOM_CIRCLE_NO_VEL: for (idx=0; idx<nElem; idx++) { radius = (float) (rand()/RAND_MAX) * y_width/2; x = (float) (rand()/RAND_MAX) * radius * rand_sign(); y = sqrt(radius*radius - x*x) * rand_sign(); r[idx].x = x_mid + x; r[idx].y = y_mid + y;; r[idx].z = (float) (rand()/RAND_MAX) * mass_range + MIN_MASS; v[idx] = (float3) {0.0f, 0.0f, 0.0f}; } break; case EXPAND_SHELL: shell_radius = y_width/2; shell_thickness = 0.25*shell_radius; CoM = (float2) {0.0f, 0.0f}; system_mass = 0.0; speed_factor=0.1; for (idx=0; idx<nElem; idx++) { // radius is the distance of point from center of window radius = (float) (rand()/RAND_MAX)*shell_thickness - shell_thickness/2 + shell_radius; x = (float) (rand()/RAND_MAX) * radius * rand_sign(); y = sqrt(radius*radius - x*x) * rand_sign(); r[idx].x = x_mid + x; r[idx].y = y_mid + y;; r[idx].z = (float) (rand()/RAND_MAX) * mass_range + MIN_MASS; CoM.x += r[idx].z * r[idx].x; CoM.y += r[idx].z * r[idx].y; system_mass += r[idx].z; } CoM.x /= system_mass; CoM.y /= system_mass; for (idx=0; idx<nElem; idx++) { // radius is now the distance of point from Center of Mass (CoM) dist.x = r[idx].x - CoM.x; dist.y = r[idx].y - CoM.y; angle = (float) atan(dist.y/dist.x); radius = sqrtf(dist.x*dist.x + dist.y*dist.y); radial_velocity = speed_factor * sqrtf(2*G*system_mass/radius); v[idx].x = radial_velocity * (float) cos(angle); v[idx].y = radial_velocity * (float) sin(angle); v[idx].z = 0.0f; } break; case SPIRAL_SINGLE_GALAXY: CoM = (float2) {0.0f, 0.0f}; system_mass = 0.0; for (idx=0; idx<nElem; idx++) { if (idx == 0) { r[idx].x = x_mid; r[idx].y = y_mid; r[idx].z = ((float) (rand()/RAND_MAX) * mass_range + MIN_MASS)*10000; } else { r[idx].x = (float) (rand()/RAND_MAX) * x_width + x_min; r[idx].y = (float) (rand()/RAND_MAX) * y_width + y_min; r[idx].z = (float) (rand()/RAND_MAX) * mass_range + MIN_MASS; } CoM.x += r[idx].z * r[idx].x; CoM.y += r[idx].z * r[idx].y; system_mass += r[idx].z; } CoM.x /= system_mass; CoM.y /= system_mass; for (idx=0; idx<nElem; idx++) { // radius is now the distance of point from Center of Mass (CoM) dist.x = r[idx].x - CoM.x; dist.y = r[idx].y - CoM.y; radius = sqrtf(dist.x*dist.x + dist.y*dist.y); unit_dist.x = dist.x / radius; unit_dist.y = dist.y / radius; tangential_speed = sqrtf(G*system_mass/radius) * 1.1; v[idx].x = unit_dist.y * tangential_speed; v[idx].y = -1*unit_dist.x * tangential_speed; v[idx].z = 0.0f; } break; case SPIRAL_DOUBLE_GALAXY: CoM = (float2) {0.0f, 0.0f}; system_mass = 0.0; for (idx=0; idx<nElem; idx++) { if (idx == 0) { r[idx].x = x_mid; r[idx].y = y_mid; r[idx].z = ((float) (rand()/RAND_MAX) * mass_range + MIN_MASS)*10000; } else { r[idx].x = (float) (rand()/RAND_MAX) * x_width + x_min; r[idx].y = (float) (rand()/RAND_MAX) * y_width + y_min; r[idx].z = (float) (rand()/RAND_MAX) * mass_range + MIN_MASS; } CoM.x += r[idx].z * r[idx].x; CoM.y += r[idx].z * r[idx].y; system_mass += r[idx].z; } CoM.x /= system_mass; CoM.y /= system_mass; for (idx=0; idx<nElem; idx++) { // radius is now the distance of point from Center of Mass (CoM) dist.x = r[idx].x - CoM.x; dist.y = r[idx].y - CoM.y; radius = sqrtf(dist.x*dist.x + dist.y*dist.y); unit_dist.x = dist.x / radius; unit_dist.y = dist.y / radius; tangential_speed = sqrtf(G*system_mass/radius) * 1.1; v[idx].x = unit_dist.y * tangential_speed; v[idx].y = -1*unit_dist.x * tangential_speed; v[idx].z = 0.0f; } break; case SPIRAL_QUAD_GALAXY: CoM = (float2) {0.0f, 0.0f}; system_mass = 0.0; for (idx=0; idx<nElem; idx++) { if (idx == 0) { r[idx].x = x_mid; r[idx].y = y_mid; r[idx].z = ((float) (rand()/RAND_MAX) * mass_range + MIN_MASS)*10000; } else { r[idx].x = (float) (rand()/RAND_MAX) * x_width + x_min; r[idx].y = (float) (rand()/RAND_MAX) * y_width + y_min; r[idx].z = (float) (rand()/RAND_MAX) * mass_range + MIN_MASS; } CoM.x += r[idx].z * r[idx].x; CoM.y += r[idx].z * r[idx].y; system_mass += r[idx].z; } CoM.x /= system_mass; CoM.y /= system_mass; for (idx=0; idx<nElem; idx++) { // radius is now the distance of point from Center of Mass (CoM) dist.x = r[idx].x - CoM.x; dist.y = r[idx].y - CoM.y; radius = sqrtf(dist.x*dist.x + dist.y*dist.y); unit_dist.x = dist.x / radius; unit_dist.y = dist.y / radius; tangential_speed = sqrtf(G*system_mass/radius) * 1.1; v[idx].x = unit_dist.y * tangential_speed; v[idx].y = -1*unit_dist.x * tangential_speed; v[idx].z = 0.0f; } break; default: for (idx=0; idx<nElem; idx++) { radius = (float) (rand()/RAND_MAX) * y_width/2; x = (float) (rand()/RAND_MAX) * radius * rand_sign(); y = sqrt(radius*radius - x*x) * rand_sign(); r[idx].x = x_mid + x; r[idx].y = y_mid + y;; r[idx].z = (float) (rand()/RAND_MAX) * mass_range + MIN_MASS; v[idx] = (float3) {0.0f, 0.0f, 0.0f}; } break; } } /*void *init_Acceleration_SMT (void *arg) { // define local variables for convenience unsigned long start, end, len, offset, nElem; nElem = US.nElem; offset = (unsigned long) arg; len = (unsigned long) US.nElem / NUM_CPU_THREADS; start = offset * len; end = start + len; unsigned long i, j; float ax_ip1, ay_ip1, az_ip1; float dx_ip1, dy_ip1, dz_ip1, rDistSquared, MinvDistCubed; float **i_r = &(US.r1); float **o_a = &(US.a1); // calculating NEXT acceleration of each body from the position of every other bodies // ... and NEXT velocity of each body utilizing the next acceleration for (i=start; i<end; i++) { ax_ip1 = 0.0; ay_ip1 = 0.0; az_ip1 = 0.0; for (j=0; j<nElem; j++) { dx_ip1 = *(*i_r + (ND*j+0)) - *(*i_r + (ND*i+0)); dy_ip1 = *(*i_r + (ND*j+1)) - *(*i_r + (ND*i+1)); dz_ip1 = *(*i_r + (ND*j+2)) - *(*i_r + (ND*i+2)); rDistSquared = dx_ip1*dx_ip1 + dy_ip1*dy_ip1 + dz_ip1*dz_ip1 + SOFTENING; MinvDistCubed = US.m[j]/sqrtf(rDistSquared*rDistSquared*rDistSquared); ax_ip1 += dx_ip1 * MinvDistCubed; ay_ip1 += dy_ip1 * MinvDistCubed; az_ip1 += dz_ip1 * MinvDistCubed; } *(*o_a + (ND*i+0)) = G*ax_ip1; *(*o_a + (ND*i+1)) = G*ay_ip1; *(*o_a + (ND*i+2)) = G*az_ip1; } pthread_exit (NULL); }*/ void print_simulationParameters (unsigned long nElem, unsigned long nIter, unsigned int cpu_threads) { printf("\n===== Simulation Parameters =====\n\n"); printf(" Number of Bodies = %ld\n", nElem); printf(" Number of Time Steps = %ld\n", nIter); printf(" Number of CPU Threads = %d\n\n", cpu_threads); printf("=================================\n\n\n"); } void print_deviceProperties (int dev, int driverVersion, int runtimeVersion, cudaDeviceProp deviceProp) { printf("\n===== Device Properties ======\n\n"); printf(" Device %d: %s\n", dev, deviceProp.name); printf(" CUDA Driver Version / Runtime Version: %d.%d / %d.%d\n", driverVersion/1000, (driverVersion%100)/10, runtimeVersion/1000, (runtimeVersion%100)/10); printf(" CUDA Capability Major/Minor version number: %d.%d\n", deviceProp.major, deviceProp.minor); printf(" Number of SMs: %d\n", deviceProp.multiProcessorCount); printf(" Total amount of global memory: %.2f GB (%llu B)\n", (float) deviceProp.totalGlobalMem/pow(1024.0,3), (unsigned long long) deviceProp.totalGlobalMem); printf(" Total amount of constant memory: %4.2f kB\n", deviceProp.totalConstMem/1024.0); printf(" Total amount of shared memory per block: %4.2f kB\n", deviceProp.sharedMemPerBlock/1024.0); printf(" Total number of registers available per block: %d\n", deviceProp.regsPerBlock); printf(" Warp size: %d\n", deviceProp.warpSize); printf(" Maximum number of threads per block: %d\n", deviceProp.maxThreadsPerBlock); printf(" Maximum number of threads per SM: %d\n", deviceProp.maxThreadsPerMultiProcessor); printf(" Maximum number of warps per SM: %d\n", deviceProp.maxThreadsPerMultiProcessor/32); printf(" Maximum size of each block dimension: %d x %d x %d\n", deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1], deviceProp.maxThreadsDim[2]); printf(" Maximum size of each grid dimension: %d x %d x %d\n", deviceProp.maxGridSize[0], deviceProp.maxGridSize[1], deviceProp.maxGridSize[2]); printf(" Maximum memory pitch: %lu B\n", deviceProp.memPitch); printf(" Memory Clock Rate (MHz): %.1f\n", deviceProp.memoryClockRate/1e3); printf(" Memory Bus Width (b): %d\n", deviceProp.memoryBusWidth); printf(" Peak Memory Bandwidth (GB/s): %.2f\n\n", 2.0*deviceProp.memoryClockRate*(deviceProp.memoryBusWidth/8)/1e6); } __device__ float2 bodyBodyInteraction (float2 ai, const float3 bi, const float3 bj) { float2 dist; dist.x = bj.x - bi.x; dist.y = bj.y - bi.y; float distSqr = dist.x*dist.x + dist.y*dist.y + SOFTENING; float invDistCube = rsqrtf(distSqr * distSqr * distSqr); float s = bj.z * invDistCube; ai.x += s * dist.x; ai.y += s * dist.y; return ai; } __global__ void initAcceleration (float3 *devA, const float3 *__restrict__ devX, const unsigned nTiles) { unsigned int gtid = blockIdx.x * BLOCK_SIZE + threadIdx.x; __shared__ float3 shPosition3[BLOCK_SIZE]; float3 myPosition3; float2 acc2 = {0.0f, 0.0f}; myPosition3 = devX[gtid]; for (unsigned tile=0; tile<nTiles; tile++) { shPosition3[threadIdx.x] = devX[ tile*BLOCK_SIZE + threadIdx.x ]; __syncthreads(); // Wait for all threads in block to load data // ... into shared memory #pragma unroll (16) for (unsigned j=0; j<BLOCK_SIZE; j++) acc2 = bodyBodyInteraction(acc2, myPosition3, shPosition3[j]); __syncthreads(); // wait for all threads in block to complete their // ... computations to not overwrite sh. mem. } devA[gtid] = (float3) {G*acc2.x, G*acc2.y, 0.0f}; } __device__ float3 calcAcceleration (const float3 *__restrict__ devX, const unsigned nTiles) { unsigned int gtid = blockIdx.x * BLOCK_SIZE + threadIdx.x; __shared__ float3 shPosition3[BLOCK_SIZE]; float3 myPosition3; float2 acc2 = {0.0f, 0.0f}; myPosition3 = devX[gtid]; for (unsigned tile=0; tile<nTiles; tile++) { shPosition3[threadIdx.x] = devX[ tile*BLOCK_SIZE + threadIdx.x ]; __syncthreads(); // Wait for all threads in block to load data // ... into shared memory #pragma unroll (16) for (unsigned j=0; j<BLOCK_SIZE; j++) acc2 = bodyBodyInteraction(acc2, myPosition3, shPosition3[j]); __syncthreads(); // wait for all threads in block to complete their // ... computations to not overwrite sh. mem. } float3 acc3 = {G*acc2.x, G*acc2.y, 0.0f}; return acc3; } __global__ void calcIntegration (float3 *devX_ip1, const float3 *__restrict__ devX_i, float3 *devV_i, float3 *devA_i, const unsigned nElem, const unsigned nTiles) { unsigned int gtid = blockIdx.x * blockDim.x + threadIdx.x; if (gtid < nElem) { // if (gtid == 1) // printf("x: %.6f\ty: %.6f\tm: %.6f\n", devX_i[gtid].x, devX_i[gtid].y, devX_i[gtid].z); float3 old_acc = devA_i[gtid]; float3 old_vel = devV_i[gtid]; float3 old_pos = devX_i[gtid]; devX_ip1[gtid].x = old_pos.x + old_vel.x*DT + old_acc.x*DTSQd2; devX_ip1[gtid].y = old_pos.y + old_vel.y*DT + old_acc.y*DTSQd2; float3 new_acc = calcAcceleration (devX_i, nTiles); devV_i [gtid].x = old_vel.x + (old_acc.x + new_acc.x)*DTd2; devV_i [gtid].y = old_vel.y + (old_acc.y + new_acc.y)*DTd2; devA_i [gtid] = new_acc; } } /*void *computeHost_SMT (void *arg) { // define local variables for convenience unsigned long start, end, len, offset, nElem, nIter; nElem = US.nElem; nIter = US.nIter; offset = (unsigned long) arg; len = (unsigned long) nElem / NUM_CPU_THREADS; start = offset * len; end = start + len; unsigned long i, j; float3 pos_i, accel_ip1, dpos_ip1; float rDistSquared, MinvDistCubed; float **i_r, **i_v, **i_a; float **o_r, **o_v, **o_a; for (unsigned long iter=0; iter<nIter; iter++) { // since the computation cannot be done inplace, we constantly need to // swap where the input and output data locations are if (iter % 2 == 0) { i_r = &(US.r1); i_v = &(US.v1); i_a = &(US.a1); o_r = &(US.r2); o_v = &(US.v2); o_a = &(US.a2); } else { i_r = &(US.r2); i_v = &(US.v2); i_a = &(US.a2); o_r = &(US.r1); o_v = &(US.v1); o_a = &(US.a1); } // calculating NEXT position of each body for (i=start; i<end; i++) { if (i % 100 == 0) { *(*o_r + (ND*i)) = *(*i_r + (ND*i)); *(*o_r + (ND*i+1)) = *(*i_r + (ND*i+1)); *(*o_r + (ND*i+2)) = *(*i_r + (ND*i+2)); } else { *(*o_r + (ND*i)) = *(*i_r + (ND*i)) + *(*i_v + (ND*i))*DT + *(*i_a + (ND*i)) *DTSQd2; *(*o_r + (ND*i+1)) = *(*i_r + (ND*i+1)) + *(*i_v + (ND*i+1))*DT + *(*i_a + (ND*i+1))*DTSQd2; *(*o_r + (ND*i+2)) = *(*i_r + (ND*i+2)) + *(*i_v + (ND*i+2))*DT + *(*i_a + (ND*i+2))*DTSQd2; } } // position computation done pthread_mutex_lock (&count_mutex); count++; if (count == NUM_CPU_THREADS*(2*iter+1)) { pthread_cond_broadcast (&count_condition); //printf("Broadcasting by tid=%ld\n", offset); } else { do { pthread_cond_wait (&count_condition, &count_mutex); //printf("Condition Broadcast received by tid=%ld\n", offset); } while (count < NUM_CPU_THREADS*(2*iter+1)); } pthread_mutex_unlock (&count_mutex); // calculating NEXT acceleration of each body from the position of every other bodies // ... and NEXT velocity of each body utilizing the next acceleration for (i=start; i<end; i++) { pos_i.x = *(*o_r + (ND*i+0)); pos_i.y = *(*o_r + (ND*i+1)); pos_i.z = *(*o_r + (ND*i+2)); accel_ip1 = (float3) {.x=0.0f, .y=0.0f, .z=0.0f}; // unrolling this loop 8x for ~2% performance improvement j = 0; while (j < nElem) { dpos_ip1.x = *(*o_r + (ND*j+0)) - pos_i.x; dpos_ip1.y = *(*o_r + (ND*j+1)) - pos_i.y; dpos_ip1.z = *(*o_r + (ND*j+2)) - pos_i.z; rDistSquared = dpos_ip1.x*dpos_ip1.x + dpos_ip1.y*dpos_ip1.y + dpos_ip1.z*dpos_ip1.z + SOFTENING; MinvDistCubed = US.m[j]/sqrtf(rDistSquared*rDistSquared*rDistSquared); accel_ip1.x += dpos_ip1.x * MinvDistCubed; accel_ip1.y += dpos_ip1.y * MinvDistCubed; accel_ip1.z += dpos_ip1.z * MinvDistCubed; j++; // unroll #1 dpos_ip1.x = *(*o_r + (ND*j+0)) - pos_i.x; dpos_ip1.y = *(*o_r + (ND*j+1)) - pos_i.y; dpos_ip1.z = *(*o_r + (ND*j+2)) - pos_i.z; rDistSquared = dpos_ip1.x*dpos_ip1.x + dpos_ip1.y*dpos_ip1.y + dpos_ip1.z*dpos_ip1.z + SOFTENING; MinvDistCubed = US.m[j]/sqrtf(rDistSquared*rDistSquared*rDistSquared); accel_ip1.x += dpos_ip1.x * MinvDistCubed; accel_ip1.y += dpos_ip1.y * MinvDistCubed; accel_ip1.z += dpos_ip1.z * MinvDistCubed; j++; // unroll #2 dpos_ip1.x = *(*o_r + (ND*j+0)) - pos_i.x; dpos_ip1.y = *(*o_r + (ND*j+1)) - pos_i.y; dpos_ip1.z = *(*o_r + (ND*j+2)) - pos_i.z; rDistSquared = dpos_ip1.x*dpos_ip1.x + dpos_ip1.y*dpos_ip1.y + dpos_ip1.z*dpos_ip1.z + SOFTENING; MinvDistCubed = US.m[j]/sqrtf(rDistSquared*rDistSquared*rDistSquared); accel_ip1.x += dpos_ip1.x * MinvDistCubed; accel_ip1.y += dpos_ip1.y * MinvDistCubed; accel_ip1.z += dpos_ip1.z * MinvDistCubed; j++; // unroll #3 dpos_ip1.x = *(*o_r + (ND*j+0)) - pos_i.x; dpos_ip1.y = *(*o_r + (ND*j+1)) - pos_i.y; dpos_ip1.z = *(*o_r + (ND*j+2)) - pos_i.z; rDistSquared = dpos_ip1.x*dpos_ip1.x + dpos_ip1.y*dpos_ip1.y + dpos_ip1.z*dpos_ip1.z + SOFTENING; MinvDistCubed = US.m[j]/sqrtf(rDistSquared*rDistSquared*rDistSquared); accel_ip1.x += dpos_ip1.x * MinvDistCubed; accel_ip1.y += dpos_ip1.y * MinvDistCubed; accel_ip1.z += dpos_ip1.z * MinvDistCubed; j++; // unroll #4 dpos_ip1.x = *(*o_r + (ND*j+0)) - pos_i.x; dpos_ip1.y = *(*o_r + (ND*j+1)) - pos_i.y; dpos_ip1.z = *(*o_r + (ND*j+2)) - pos_i.z; rDistSquared = dpos_ip1.x*dpos_ip1.x + dpos_ip1.y*dpos_ip1.y + dpos_ip1.z*dpos_ip1.z + SOFTENING; MinvDistCubed = US.m[j]/sqrtf(rDistSquared*rDistSquared*rDistSquared); accel_ip1.x += dpos_ip1.x * MinvDistCubed; accel_ip1.y += dpos_ip1.y * MinvDistCubed; accel_ip1.z += dpos_ip1.z * MinvDistCubed; j++; // unroll #5 dpos_ip1.x = *(*o_r + (ND*j+0)) - pos_i.x; dpos_ip1.y = *(*o_r + (ND*j+1)) - pos_i.y; dpos_ip1.z = *(*o_r + (ND*j+2)) - pos_i.z; rDistSquared = dpos_ip1.x*dpos_ip1.x + dpos_ip1.y*dpos_ip1.y + dpos_ip1.z*dpos_ip1.z + SOFTENING; MinvDistCubed = US.m[j]/sqrtf(rDistSquared*rDistSquared*rDistSquared); accel_ip1.x += dpos_ip1.x * MinvDistCubed; accel_ip1.y += dpos_ip1.y * MinvDistCubed; accel_ip1.z += dpos_ip1.z * MinvDistCubed; j++; // unroll #6 dpos_ip1.x = *(*o_r + (ND*j+0)) - pos_i.x; dpos_ip1.y = *(*o_r + (ND*j+1)) - pos_i.y; dpos_ip1.z = *(*o_r + (ND*j+2)) - pos_i.z; rDistSquared = dpos_ip1.x*dpos_ip1.x + dpos_ip1.y*dpos_ip1.y + dpos_ip1.z*dpos_ip1.z + SOFTENING; MinvDistCubed = US.m[j]/sqrtf(rDistSquared*rDistSquared*rDistSquared); accel_ip1.x += dpos_ip1.x * MinvDistCubed; accel_ip1.y += dpos_ip1.y * MinvDistCubed; accel_ip1.z += dpos_ip1.z * MinvDistCubed; j++; // unroll #7 dpos_ip1.x = *(*o_r + (ND*j+0)) - pos_i.x; dpos_ip1.y = *(*o_r + (ND*j+1)) - pos_i.y; dpos_ip1.z = *(*o_r + (ND*j+2)) - pos_i.z; rDistSquared = dpos_ip1.x*dpos_ip1.x + dpos_ip1.y*dpos_ip1.y + dpos_ip1.z*dpos_ip1.z + SOFTENING; MinvDistCubed = US.m[j]/sqrtf(rDistSquared*rDistSquared*rDistSquared); accel_ip1.x += dpos_ip1.x * MinvDistCubed; accel_ip1.y += dpos_ip1.y * MinvDistCubed; accel_ip1.z += dpos_ip1.z * MinvDistCubed; j++; // unroll #8 } *(*o_a + (ND*i+0)) = G*accel_ip1.x; *(*o_a + (ND*i+1)) = G*accel_ip1.y; *(*o_a + (ND*i+2)) = G*accel_ip1.z; *(*o_v + (ND*i+0)) = *(*i_v + (ND*i+0)) + (*(*i_a + (ND*i+0))+accel_ip1.x)*DTd2; *(*o_v + (ND*i+1)) = *(*i_v + (ND*i+1)) + (*(*i_a + (ND*i+1))+accel_ip1.y)*DTd2; *(*o_v + (ND*i+2)) = *(*i_v + (ND*i+2)) + (*(*i_a + (ND*i+2))+accel_ip1.z)*DTd2; } // computation completed on thread. Acquire mutex to increment count variable... pthread_mutex_lock (&count_mutex); count++; if (count == NUM_CPU_THREADS*(2*iter+2)) { // writing to file for (unsigned int idx=0; idx<nElem; idx++) { fprintf(destFile, "%f,%f,%f\n", *(*o_r+ND*idx+0), *(*o_r+ND*idx+1), *(*o_r+ND*idx+2)); } pthread_cond_broadcast (&count_condition); //printf("Broadcasting by tid=%ld\n", offset); } else { do { pthread_cond_wait (&count_condition, &count_mutex); //printf("Condition Broadcast received by tid=%ld\n", offset); } while (count < NUM_CPU_THREADS*(2*iter+2)); } pthread_mutex_unlock (&count_mutex); if (offset == 1) { printf("%ld:\tx: %.6f\ty: %.6f\tz: %.6f\n", iter, *(*o_r + (ND*offset)), *(*o_r + (ND*offset)+1), *(*o_r + (ND*offset)+2)); } //if (offset == 0) // print_BodyStats (US.m, *o_r, *o_v, *o_a); } pthread_exit (NULL); }*/
c47cc24fcb3d04ee2cbce1f6501fb8a16d813aac.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef LU_INVERSE_PLAIN_CU #define LU_INVERSE_PLAIN_CU #include "hamc_common.h" #include "hamc_cpu_code.c" #include "LU_inverse_plain_kernels.cu" bin_matrix inverse_GF2_LU_gpu(bin_matrix A, bool verbose) { // B is output matrix bin_matrix B = mat_init_cpu(A->rows, A->cols); clock_t LU_start = clock(); /* allocate device memory */ HAMC_DATA_TYPE_t *deviceA; HAMC_DATA_TYPE_t *deviceB; int *hostIPIV = (int *)malloc(A->rows*sizeof(int)); int *deviceIPIV; hipMalloc((void **) &deviceA, A->rows * A->cols * sizeof(HAMC_DATA_TYPE_t)); hipMalloc((void **) &deviceB, A->rows * A->cols * sizeof(HAMC_DATA_TYPE_t)); hipMalloc((void **) &deviceIPIV, A->rows * sizeof(int)); /* transfer host data to device */ hipMemcpy(deviceA, A->data, A->rows * A->cols * sizeof(HAMC_DATA_TYPE_t), hipMemcpyHostToDevice); if (verbose) printf("Starting Inverse matrix kernel...\n"); // total number of threads should be at least A->cols int numThreadsPerBlock = 1024; int numGrids = A->cols/numThreadsPerBlock + 1; if (verbose) { printf("\t# threadBlocks: %s%d%s\n", YELLOW, numGrids, NC); printf("\t# threads per block: %s%d%s\n", YELLOW, numThreadsPerBlock, NC); printf("\tTotal threads: %s%d%s\n", YELLOW,numGrids*numThreadsPerBlock, NC); } dim3 dimGrid = dim3(numGrids, 1); dim3 dimThreads = dim3(numThreadsPerBlock); hipStream_t stream0; hipStream_t stream1; hipError_t cudaerr; hipStreamCreate(&stream0); hipStreamCreate(&stream1); // Streaming identity matrix generation to hide the latency // while we do LU decomposition hipLaunchKernelGGL(( make_GF2_identity_gpu), dim3(1),dim3(1),0,stream0, deviceB, A->rows); /******************** LU decomposition ************************************/ if (verbose) printf("Performing LU Decomposition...\n"); clock_t LU_decompose_start = clock(); // Unfortunately this has to be asynchronous. for (int i = 0; i < A->rows; i++) { hipLaunchKernelGGL(( GF2_LU_decompose_find_max_row), dim3(1), dim3(1), 0, stream1, deviceA, deviceIPIV, A->rows, i); hipLaunchKernelGGL(( GF2_LU_decompose_pivot_row), dim3(dimGrid), dim3(dimThreads), 0, stream1, deviceA, deviceIPIV, A->rows, i); hipLaunchKernelGGL(( GF2_LU_decompose_update_trailing_row) , dim3(dimGrid), dim3(dimThreads), 0, stream1, deviceA, A->rows, i); } hipStreamDestroy(stream0); hipStreamDestroy(stream1); clock_t LU_decompose_end = clock(); double LU_decompose_time = ((double) (LU_decompose_end - LU_decompose_start))/ CLOCKS_PER_SEC; cudaerr = hipDeviceSynchronize(); if (cudaerr != hipSuccess) printf("kernel launch failed with error \"%s\".\n", hipGetErrorString(cudaerr)); /******************** Forward Substitution ********************************/ clock_t LU_forward_start = clock(); if (verbose) printf("Performing Forward Substitution...\n"); hipLaunchKernelGGL(( GF2_Forward_substitute), dim3(dimGrid), dim3(dimThreads), 0, 0, deviceA, deviceB, A->rows); cudaerr = hipDeviceSynchronize(); if (cudaerr != hipSuccess) printf("kernel launch failed with error \"%s\".\n", hipGetErrorString(cudaerr)); clock_t LU_forward_end = clock(); double LU_forward_time = ((double) (LU_forward_end - LU_forward_start))/ CLOCKS_PER_SEC; /******************** Backward Substitution *******************************/ clock_t LU_backward_start = clock(); if (verbose) printf("Performing Backward Substitution...\n"); hipLaunchKernelGGL(( GF2_Backward_substitute), dim3(dimGrid), dim3(dimThreads), 0, 0, deviceA, deviceB, A->rows); cudaerr = hipDeviceSynchronize(); if (cudaerr != hipSuccess) printf("kernel launch failed with error \"%s\".\n", hipGetErrorString(cudaerr)); clock_t LU_backward_end = clock(); double LU_backward_time = ((double) (LU_backward_end - LU_backward_start))/ CLOCKS_PER_SEC; /******************** Final Swap ******************************************/ clock_t LU_final_swap_start = clock(); if (verbose) printf("Performing Final swap...\n"); hipLaunchKernelGGL(( GF2_swap_cols), dim3(dimGrid), dim3(dimThreads), 0, 0, deviceB, deviceIPIV, A->rows); cudaerr = hipDeviceSynchronize(); if (cudaerr != hipSuccess) printf("kernel launch failed with error \"%s\".\n", hipGetErrorString(cudaerr)); clock_t LU_final_swap_end = clock(); double LU_final_swap_time = ((double) (LU_final_swap_end - LU_final_swap_start))/ CLOCKS_PER_SEC; if (verbose) printf("Done!\n"); hipMemcpy(B->data, deviceB, A->rows * A->cols * sizeof(HAMC_DATA_TYPE_t), hipMemcpyDeviceToHost); clock_t LU_end = clock(); double LU_time = ((double) (LU_end - LU_start))/ CLOCKS_PER_SEC; if (verbose) { printf("Total time for LU inverse (GPU): %.7lf\n", LU_time); printf("\tLU decomposition: %.7lf - %.2lf%%\n", LU_decompose_time, 100*(LU_decompose_time/LU_time)); printf("\tForward Substitution: %.7lf - %.2lf%%\n", LU_forward_time, 100*(LU_forward_time/LU_time)); printf("\tBackward Substitution: %.7lf - %.2lf%%\n", LU_backward_time, 100*(LU_backward_time/LU_time)); printf("\tFinal Swap: %.7lf - %.2lf%%\n", LU_final_swap_time, 100*(LU_final_swap_time/LU_time)); } hipFree(deviceA); hipFree(deviceB); free(hostIPIV); return B; } #endif /* REFERENCE_GPU_CU */
c47cc24fcb3d04ee2cbce1f6501fb8a16d813aac.cu
#ifndef LU_INVERSE_PLAIN_CU #define LU_INVERSE_PLAIN_CU #include "hamc_common.h" #include "hamc_cpu_code.c" #include "LU_inverse_plain_kernels.cu" bin_matrix inverse_GF2_LU_gpu(bin_matrix A, bool verbose) { // B is output matrix bin_matrix B = mat_init_cpu(A->rows, A->cols); clock_t LU_start = clock(); /* allocate device memory */ HAMC_DATA_TYPE_t *deviceA; HAMC_DATA_TYPE_t *deviceB; int *hostIPIV = (int *)malloc(A->rows*sizeof(int)); int *deviceIPIV; cudaMalloc((void **) &deviceA, A->rows * A->cols * sizeof(HAMC_DATA_TYPE_t)); cudaMalloc((void **) &deviceB, A->rows * A->cols * sizeof(HAMC_DATA_TYPE_t)); cudaMalloc((void **) &deviceIPIV, A->rows * sizeof(int)); /* transfer host data to device */ cudaMemcpy(deviceA, A->data, A->rows * A->cols * sizeof(HAMC_DATA_TYPE_t), cudaMemcpyHostToDevice); if (verbose) printf("Starting Inverse matrix kernel...\n"); // total number of threads should be at least A->cols int numThreadsPerBlock = 1024; int numGrids = A->cols/numThreadsPerBlock + 1; if (verbose) { printf("\t# threadBlocks: %s%d%s\n", YELLOW, numGrids, NC); printf("\t# threads per block: %s%d%s\n", YELLOW, numThreadsPerBlock, NC); printf("\tTotal threads: %s%d%s\n", YELLOW,numGrids*numThreadsPerBlock, NC); } dim3 dimGrid = dim3(numGrids, 1); dim3 dimThreads = dim3(numThreadsPerBlock); cudaStream_t stream0; cudaStream_t stream1; cudaError_t cudaerr; cudaStreamCreate(&stream0); cudaStreamCreate(&stream1); // Streaming identity matrix generation to hide the latency // while we do LU decomposition make_GF2_identity_gpu<<<1,1,0,stream0>>>(deviceB, A->rows); /******************** LU decomposition ************************************/ if (verbose) printf("Performing LU Decomposition...\n"); clock_t LU_decompose_start = clock(); // Unfortunately this has to be asynchronous. for (int i = 0; i < A->rows; i++) { GF2_LU_decompose_find_max_row<<<1, 1, 0, stream1>>> (deviceA, deviceIPIV, A->rows, i); GF2_LU_decompose_pivot_row<<<dimGrid, dimThreads, 0, stream1>>> (deviceA, deviceIPIV, A->rows, i); GF2_LU_decompose_update_trailing_row <<<dimGrid, dimThreads, 0, stream1>>>(deviceA, A->rows, i); } cudaStreamDestroy(stream0); cudaStreamDestroy(stream1); clock_t LU_decompose_end = clock(); double LU_decompose_time = ((double) (LU_decompose_end - LU_decompose_start))/ CLOCKS_PER_SEC; cudaerr = cudaDeviceSynchronize(); if (cudaerr != cudaSuccess) printf("kernel launch failed with error \"%s\".\n", cudaGetErrorString(cudaerr)); /******************** Forward Substitution ********************************/ clock_t LU_forward_start = clock(); if (verbose) printf("Performing Forward Substitution...\n"); GF2_Forward_substitute<<<dimGrid, dimThreads>>> (deviceA, deviceB, A->rows); cudaerr = cudaDeviceSynchronize(); if (cudaerr != cudaSuccess) printf("kernel launch failed with error \"%s\".\n", cudaGetErrorString(cudaerr)); clock_t LU_forward_end = clock(); double LU_forward_time = ((double) (LU_forward_end - LU_forward_start))/ CLOCKS_PER_SEC; /******************** Backward Substitution *******************************/ clock_t LU_backward_start = clock(); if (verbose) printf("Performing Backward Substitution...\n"); GF2_Backward_substitute<<<dimGrid, dimThreads>>> (deviceA, deviceB, A->rows); cudaerr = cudaDeviceSynchronize(); if (cudaerr != cudaSuccess) printf("kernel launch failed with error \"%s\".\n", cudaGetErrorString(cudaerr)); clock_t LU_backward_end = clock(); double LU_backward_time = ((double) (LU_backward_end - LU_backward_start))/ CLOCKS_PER_SEC; /******************** Final Swap ******************************************/ clock_t LU_final_swap_start = clock(); if (verbose) printf("Performing Final swap...\n"); GF2_swap_cols<<<dimGrid, dimThreads>>>(deviceB, deviceIPIV, A->rows); cudaerr = cudaDeviceSynchronize(); if (cudaerr != cudaSuccess) printf("kernel launch failed with error \"%s\".\n", cudaGetErrorString(cudaerr)); clock_t LU_final_swap_end = clock(); double LU_final_swap_time = ((double) (LU_final_swap_end - LU_final_swap_start))/ CLOCKS_PER_SEC; if (verbose) printf("Done!\n"); cudaMemcpy(B->data, deviceB, A->rows * A->cols * sizeof(HAMC_DATA_TYPE_t), cudaMemcpyDeviceToHost); clock_t LU_end = clock(); double LU_time = ((double) (LU_end - LU_start))/ CLOCKS_PER_SEC; if (verbose) { printf("Total time for LU inverse (GPU): %.7lf\n", LU_time); printf("\tLU decomposition: %.7lf - %.2lf%%\n", LU_decompose_time, 100*(LU_decompose_time/LU_time)); printf("\tForward Substitution: %.7lf - %.2lf%%\n", LU_forward_time, 100*(LU_forward_time/LU_time)); printf("\tBackward Substitution: %.7lf - %.2lf%%\n", LU_backward_time, 100*(LU_backward_time/LU_time)); printf("\tFinal Swap: %.7lf - %.2lf%%\n", LU_final_swap_time, 100*(LU_final_swap_time/LU_time)); } cudaFree(deviceA); cudaFree(deviceB); free(hostIPIV); return B; } #endif /* REFERENCE_GPU_CU */
b05d6e33ef19bd661f9c811c1cce26262d2c992b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "utils.h" __global__ void rgba_to_greyscale(const uchar4* const rgbaImage, unsigned char* const greyImage, int numRows, int numCols) { size_t c = threadIdx.x + blockIdx.x * blockDim.x; size_t r = threadIdx.y + blockIdx.y * blockDim.y; if(r <= numRows && c <= numCols) { uchar4 rgba = rgbaImage[r * numCols + c]; float channelSum = .299f * rgba.x + .587f * rgba.y + .114f * rgba.z; greyImage[r * numCols + c] = channelSum; } } void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage, unsigned char* const d_greyImage, size_t numRows, size_t numCols) { size_t block_size = 32; const dim3 blockSize(block_size, block_size, 1); const dim3 gridSize(ceil(double(numCols)/block_size), ceil(double(numRows) / block_size), 1); hipLaunchKernelGGL(( rgba_to_greyscale), dim3(gridSize), dim3(blockSize), 0, 0, d_rgbaImage, d_greyImage, numRows, numCols); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); }
b05d6e33ef19bd661f9c811c1cce26262d2c992b.cu
#include "utils.h" __global__ void rgba_to_greyscale(const uchar4* const rgbaImage, unsigned char* const greyImage, int numRows, int numCols) { size_t c = threadIdx.x + blockIdx.x * blockDim.x; size_t r = threadIdx.y + blockIdx.y * blockDim.y; if(r <= numRows && c <= numCols) { uchar4 rgba = rgbaImage[r * numCols + c]; float channelSum = .299f * rgba.x + .587f * rgba.y + .114f * rgba.z; greyImage[r * numCols + c] = channelSum; } } void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage, unsigned char* const d_greyImage, size_t numRows, size_t numCols) { size_t block_size = 32; const dim3 blockSize(block_size, block_size, 1); const dim3 gridSize(ceil(double(numCols)/block_size), ceil(double(numRows) / block_size), 1); rgba_to_greyscale<<<gridSize, blockSize>>>(d_rgbaImage, d_greyImage, numRows, numCols); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); }
a2e25cac83489c059973e211a03977e058eb6f55.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "core/providers/cuda/cuda_common.h" #include "core/providers/cuda/cu_inc/common.cuh" #include "cub/util_type.cuh" #include <hipcub/hipcub.hpp> #include <cub/device/device_segmented_radix_sort.cuh> #include "contrib_ops/cuda/transformers/generation_cuda_impl.h" namespace onnxruntime { namespace contrib { namespace cuda { __global__ void InitKernel(float* beam_scores, int num_beams, int total_elements) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < total_elements) { int beam_index = index % num_beams; beam_scores[index] = beam_index > 0 ? static_cast<float>(-1e9) : 0.0f; } } void LaunchInitKernel( float* beam_scores, int batch_size, int num_beams, hipStream_t stream) { int total_elements = batch_size * num_beams; constexpr int blockSize = 256; const int gridSize = (total_elements + blockSize - 1) / blockSize; hipLaunchKernelGGL(( InitKernel), dim3(gridSize), dim3(blockSize), 0, stream, beam_scores, num_beams, total_elements); } __global__ void NextTokenKernel(const int64_t* next_token_indices, int32_t* next_indices, int32_t* next_tokens, int vocab_size, int total_elements) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < total_elements) { next_indices[index] = next_token_indices[index] / vocab_size; next_tokens[index] = next_token_indices[index] % vocab_size; } } void LaunchNextTokenKernel(const int64_t* next_token_indices, int32_t* next_indices, int32_t* next_tokens, int batch_size, int top_k, int vocab_size, hipStream_t stream) { int total_elements = batch_size * top_k; constexpr int blockSize = 256; const int gridSize = (total_elements + blockSize - 1) / blockSize; hipLaunchKernelGGL(( NextTokenKernel), dim3(gridSize), dim3(blockSize), 0, stream, next_token_indices, next_indices, next_tokens, vocab_size, total_elements); } template <typename T> __global__ void LogitsProcessKernel( T* next_token_scores, const int* vocab_mask, const int* prefix_vocab_mask, const int* presence_mask, float presence_penalty, float temperature, int num_beams, int vocab_size, int padded_vocab_size, int total_elements, int demote_token_id, int32_t* sequences, int max_sequence_length, int current_sequence_length, float repetition_penalty, int no_repeat_ngram_size) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < total_elements) { int batch_beam_index = index / padded_vocab_size; int word_id = index % padded_vocab_size; if (word_id >= vocab_size) { // Set any value within the padding region to the lowest value so that it isn't picked next_token_scores[index] = cub::FpLimits<T>::Lowest(); } else { // RepetitionPenaltyLogitsProcessor if (repetition_penalty != 1.0f) { int32_t* current_sequence = sequences + batch_beam_index * max_sequence_length; bool found = false; for (int i = 0; i < current_sequence_length; i++) { if (current_sequence[i] == word_id) { found = true; break; } } if (found) { float score = (float)next_token_scores[index]; next_token_scores[index] = (T)(score < 0 ? score * repetition_penalty : score / repetition_penalty); } } // NoRepeatNGramLogitsProcessor if (no_repeat_ngram_size > 0 && current_sequence_length >= no_repeat_ngram_size) { int32_t* current_sequence = sequences + batch_beam_index * max_sequence_length; bool found = false; for (int i = no_repeat_ngram_size - 1; i < current_sequence_length; i++) { if (current_sequence[i] == word_id) { // last token of n-gram matched found = true; for (int j = 0; j < no_repeat_ngram_size - 1; j++) { // match the remaining N-1 tokens if (current_sequence[i - j - 1] != current_sequence[current_sequence_length - 1 - j]) { found = false; break; } } if (found) { break; } } } if (found) { next_token_scores[index] = cub::FpLimits<T>::Lowest(); return; } } // VocabMaskLogitsProcessor if (vocab_mask != nullptr && vocab_mask[word_id] == 0) { next_token_scores[index] = cub::FpLimits<T>::Lowest(); return; } // PrefixVocabMaskLogitsProcessor int batch_id = batch_beam_index / num_beams; if (prefix_vocab_mask != nullptr && prefix_vocab_mask[batch_id * vocab_size + word_id] == 0) { next_token_scores[index] = cub::FpLimits<T>::Lowest(); return; } // MinLengthLogitsProcessor if (word_id == demote_token_id) { next_token_scores[index] = cub::FpLimits<T>::Lowest(); } // PresencePenaltyLogitsProcessor if (presence_mask != nullptr && presence_mask[index] == 1) { float score = (float)next_token_scores[index] - presence_penalty; next_token_scores[index] = (T)score; } // TemperatureLogitsProcessor if (temperature != 1.0f) { float score = (float)(next_token_scores[index]); next_token_scores[index] = (T)(score / temperature); } } } } template <typename T> void LaunchLogitsProcessKernel( T* next_token_scores, const int* vocab_mask, const int* prefix_vocab_mask, int* presence_mask, float presence_penalty, float temperature, int batch_size, int num_beams, int vocab_size, int padded_vocab_size, int demote_token_id, int32_t* sequences, int max_sequence_length, int current_sequence_length, float repetition_penalty, int no_repeat_ngram_size, hipStream_t stream) { int total_elements = batch_size * num_beams * padded_vocab_size; constexpr int blockSize = 256; const int gridSize = (total_elements + blockSize - 1) / blockSize; hipLaunchKernelGGL(( LogitsProcessKernel<T>), dim3(gridSize), dim3(blockSize), 0, stream, next_token_scores, vocab_mask, prefix_vocab_mask, presence_mask, presence_penalty, temperature, num_beams, vocab_size, padded_vocab_size, total_elements, demote_token_id, sequences, max_sequence_length, current_sequence_length, repetition_penalty, no_repeat_ngram_size); } // Instantiation template void LaunchLogitsProcessKernel( float* next_token_scores, const int* vocab_mask, const int* prefix_vocab_mask, int* presence_mask, float presence_penalty, float temperature, int batch_size, int num_beams, int vocab_size, int padded_vocab_size, int demote_token_id, int32_t* sequences, int max_sequence_length, int current_sequence_length, float repetition_penalty, int no_repeat_ngram_size, hipStream_t stream); template void LaunchLogitsProcessKernel( half* next_token_scores, const int* vocab_mask, const int* prefix_vocab_mask, int* presence_mask, float presence_penalty, float temperature, int batch_size, int num_beams, int vocab_size, int padded_vocab_size, int demote_token_id, int32_t* sequences, int max_sequence_length, int current_sequence_length, float repetition_penalty, int no_repeat_ngram_size, hipStream_t stream); __global__ void AddProbsKernel(float* log_probs, float* cum_log_probs, const int vocab_size, const int total_elements) { int index = blockIdx.x * blockDim.x + threadIdx.x; int batch_beam_index = index / vocab_size; if (index < total_elements) log_probs[index] += cum_log_probs[batch_beam_index]; } template <typename T> void LaunchAddProbsKernel(T* log_probs, T* cum_log_probs, const int batch_size, const int num_beams, const int vocab_size, hipStream_t stream) { int total_elements = batch_size * num_beams * vocab_size; constexpr int blockSize = 256; const int gridSize = (total_elements + blockSize - 1) / blockSize; hipLaunchKernelGGL(( AddProbsKernel), dim3(gridSize), dim3(blockSize), 0, stream, log_probs, cum_log_probs, vocab_size, total_elements); } template void LaunchAddProbsKernel( float* log_probs, float* cum_log_probs, const int batch_size, const int num_beams, const int vocab_size, hipStream_t stream); template <typename T> __global__ void UpdateGptInputsKernel(const T* old_mask_data, T* mask_data, int32_t* next_positions, int batch_beam_size, int current_length) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < batch_beam_size * current_length) { // Update attention mask. int i = index / current_length; int j = index % current_length; mask_data[index] = (j < current_length - 1) ? old_mask_data[i * (current_length - 1) + j] : static_cast<T>(1); if (next_positions != nullptr) { // Update sequence length (or next positions). if (index < batch_beam_size) { next_positions[index]++; } } } } void LaunchUpdateGptKernel(const int32_t* old_mask_data, int32_t* mask_data, int32_t* next_positions, int batch_beam_size, int current_length, hipStream_t stream) { assert(current_length > 0); int total_elements = batch_beam_size * current_length; constexpr int blockSize = 256; const int gridSize = (total_elements + blockSize - 1) / blockSize; hipLaunchKernelGGL(( UpdateGptInputsKernel<int32_t>), dim3(gridSize), dim3(blockSize), 0, stream, old_mask_data, mask_data, next_positions, batch_beam_size, current_length); } template <typename T> void GetTempStorageSize(const T* d_keys_in, const int* d_values_in, int* d_offsets, int num_items, int num_segments, hipStream_t stream, bool is_descending, size_t& temp_storage_bytes) { if (is_descending) { CUDA_CALL_THROW(hipcub::DeviceSegmentedRadixSort::SortPairsDescending(nullptr, temp_storage_bytes, d_keys_in, (T*)nullptr, d_values_in, (int*)nullptr, num_items, num_segments, d_offsets, d_offsets + 1, 0, sizeof(T) * 8, stream)); } else { CUDA_CALL_THROW(hipcub::DeviceSegmentedRadixSort::SortPairs(nullptr, temp_storage_bytes, d_keys_in, (T*)nullptr, d_values_in, (int*)nullptr, num_items, num_segments, d_offsets, d_offsets + 1, 0, sizeof(T) * 8, stream)); } } template void GetTempStorageSize( const float* d_keys_in, const int* d_values_in, int* d_offsets, int num_items, int num_segments, hipStream_t stream, bool is_descending, size_t& temp_storage_bytes); template void GetTempStorageSize( const half* d_keys_in, const int* d_values_in, int* d_offsets, int num_items, int num_segments, hipStream_t stream, bool is_descending, size_t& temp_storage_bytes); // TODO: merge to one kernel __global__ void SetupParamsKernel(int* d_values_in, int* d_offsets, int batch_size, int vocab_size) { int index = blockIdx.x * blockDim.x + threadIdx.x; int total_elements = batch_size * vocab_size; if (index < total_elements) { d_values_in[index] = index % vocab_size; } if (index < batch_size + 1) { d_offsets[index] = index * vocab_size; } } void LaunchSetupParamsKernel(int* d_values_in, int* d_offsets, int batch_size, int vocab_size, hipStream_t stream) { int total_elements = batch_size * vocab_size; constexpr int blockSize = 256; const int gridSize = (total_elements + blockSize - 1) / blockSize; hipLaunchKernelGGL(( SetupParamsKernel), dim3(gridSize), dim3(blockSize), 0, stream, d_values_in, d_offsets, batch_size, vocab_size); } template <typename T> void LaunchSortPairs(void* d_temp_storage, size_t temp_storage_bytes, const T* d_keys_in, T* d_keys_out, const int* d_values_in, int* d_values_out, int num_items, int num_segments, int* d_offsets, hipStream_t stream, bool is_descending) { if (is_descending) { CUDA_CALL_THROW(hipcub::DeviceSegmentedRadixSort::SortPairsDescending(d_temp_storage, temp_storage_bytes, d_keys_in, d_keys_out, d_values_in, d_values_out, num_items, num_segments, d_offsets, d_offsets + 1, 0, sizeof(T) * 8, stream)); } else { CUDA_CALL_THROW(hipcub::DeviceSegmentedRadixSort::SortPairs(d_temp_storage, temp_storage_bytes, d_keys_in, d_keys_out, d_values_in, d_values_out, num_items, num_segments, d_offsets, d_offsets + 1, 0, sizeof(T) * 8, stream)); } } template void LaunchSortPairs(void* d_temp_storage, size_t temp_storage_bytes, const float* d_keys_in, float* d_keys_out, const int* d_values_in, int* d_values_out, int num_items, int num_segments, int* d_offsets, hipStream_t stream, bool is_descending); template void LaunchSortPairs(void* d_temp_storage, size_t temp_storage_bytes, const half* d_keys_in, half* d_keys_out, const int* d_values_in, int* d_values_out, int num_items, int num_segments, int* d_offsets, hipStream_t stream, bool is_descending); // A stateful callback functor that maintains a running prefix to be applied // during consecutive scan operations. struct BlockPrefixCallbackOp { float running_total; // running prefix __device__ BlockPrefixCallbackOp(float running_total) : running_total(running_total) {} // Callback operator to be entered by the first warp of threads in the block. // Thread-0 is responsible for returning a value for seeding the block-wide scan. __device__ float operator()(float block_aggregate) { float old_prefix = running_total; running_total += block_aggregate; return old_prefix; } }; template <typename T, int kBlockSize> __global__ void FilterLogitsKernelCustom(float* d_sorted_logits_in, const int* d_sorted_indices, T* d_logits_in_out, float top_p_threshold, float filter_value, int batch_size, int vocab_size) { int vocab_idx = threadIdx.x; int batch_id = blockIdx.x; int offset = batch_id * vocab_size; typedef hipcub::BlockScan<float, kBlockSize> BlockScan; __shared__ typename BlockScan::TempStorage temp_storage; BlockPrefixCallbackOp prefix_op(0); for (int idx = vocab_idx; idx < vocab_size; idx += kBlockSize) { float sum = d_sorted_logits_in[offset + idx]; BlockScan(temp_storage).ExclusiveSum(sum, sum, prefix_op); __syncthreads(); if (sum >= top_p_threshold) { int original_index = offset + d_sorted_indices[offset + idx]; d_logits_in_out[original_index] = (T)filter_value; } } } template <typename T, int kBlockSize> __global__ void FilterLogitsKernel(float* d_sorted_logits_in, const int* d_sorted_indices, T* d_logits_in_out, float top_p_threshold, float filter_value, int min_tokens_to_keep, int batch_size, int vocab_size) { int vocab_idx = threadIdx.x; int batch_id = blockIdx.x; int offset = batch_id * vocab_size; typedef hipcub::BlockScan<float, kBlockSize> BlockScan; __shared__ typename BlockScan::TempStorage temp_storage; BlockPrefixCallbackOp prefix_op(0); for (int idx = vocab_idx; idx < vocab_size; idx += kBlockSize) { float sum = d_sorted_logits_in[offset + idx]; BlockScan(temp_storage).InclusiveSum(sum, sum, prefix_op); __syncthreads(); if (sum <= top_p_threshold) { if (idx + min_tokens_to_keep < vocab_size) { int original_index = offset + d_sorted_indices[offset + idx]; d_logits_in_out[original_index] = (T)filter_value; } } } } template <typename T> void LaunchFilterLogitsKernel(float* d_sorted_logits_in, const int* d_sorted_indices, T* d_logits_in_out, float top_p, float filter_value, int min_tokens_to_keep, int batch_size, int vocab_size, hipStream_t stream, bool is_descending) { constexpr int kBlockSize = 256; if (is_descending) { hipLaunchKernelGGL(( FilterLogitsKernelCustom<T, kBlockSize>), dim3(batch_size), dim3(kBlockSize), 0, stream, d_sorted_logits_in, d_sorted_indices, d_logits_in_out, top_p, filter_value, batch_size, vocab_size); } else { hipLaunchKernelGGL(( FilterLogitsKernel<T, kBlockSize>), dim3(batch_size), dim3(kBlockSize), 0, stream, d_sorted_logits_in, d_sorted_indices, d_logits_in_out, 1 - top_p, filter_value, min_tokens_to_keep, batch_size, vocab_size); } } template void LaunchFilterLogitsKernel(float* d_sorted_logits_in, const int* d_sorted_indices, float* d_logits_in_out, float top_p, float filter_value, int min_tokens_to_keep, int batch_size, int vocab_size, hipStream_t stream, bool is_descending); template void LaunchFilterLogitsKernel(float* d_sorted_logits_in, const int* d_sorted_indices, half* d_logits_in_out, float top_p, float filter_value, int min_tokens_to_keep, int batch_size, int vocab_size, hipStream_t stream, bool is_descending); // Ref: https://github.com/pytorch/pytorch/blob/release/1.13/aten/src/ATen/native/cuda/MultinomialKernel.cu template <typename scalar_t, typename accscalar_t> __global__ void sampleMultinomialOnce(int32_t* dest, int distributions, int categories, scalar_t* sampled, scalar_t* dist, int stride_dist, // dist->stride(0) int stride_categories, // dist->stride(1) int* d_presence_mask) { extern __shared__ unsigned char my_smem[]; __shared__ bool found; __shared__ unsigned foundPos; accscalar_t* smem = reinterpret_cast<accscalar_t*>(my_smem); accscalar_t accZero = static_cast<accscalar_t>(0); scalar_t zero = static_cast<scalar_t>(0); for (int curDist = blockIdx.x; curDist < distributions; curDist += gridDim.x) { // Assume sum = 1 in Top P sampling as the input is softmaxed. accscalar_t sum = 1; // Broadcast sum and sample value if (threadIdx.x == 0) { // Make sure the sum of our distribution didn't overflow // CUDA_KERNEL_ASSERT(!_isinf(val)); // CUDA_KERNEL_ASSERT(sum > accZero); foundPos = 0; smem[0] = sum; smem[1] = sampled[curDist]; } __syncthreads(); sum = smem[0]; scalar_t sample = static_cast<scalar_t>(smem[1]); __syncthreads(); if (sum == accZero) { // Choose the first element if (threadIdx.x == 0) { dest[curDist] = 0; } continue; } int chunks = (categories + (int)blockDim.x - 1) / blockDim.x; accscalar_t prevHighProb = accZero; found = false; for (int chunk = 0; chunk < chunks && !found; ++chunk) { // All threads in bounds load a value int cat = chunk * blockDim.x + threadIdx.x; accscalar_t dist_val = cat < categories ? static_cast<accscalar_t>(dist[curDist * stride_dist + cat * stride_categories]) / sum : accZero; smem[threadIdx.x] = dist_val; __syncthreads(); // Perform an inclusive prefix sum of the shared memory contents for (int offset = 1; offset < blockDim.x; offset *= 2) { accscalar_t val = accZero; if (threadIdx.x >= offset) { val = smem[threadIdx.x - offset] + smem[threadIdx.x]; } __syncthreads(); if (threadIdx.x >= offset) { smem[threadIdx.x] = val; } __syncthreads(); } // Each thread will check to see if the sample falls in its bucket scalar_t curBucket = static_cast<scalar_t>(smem[threadIdx.x] + prevHighProb); scalar_t prevBucket = static_cast<scalar_t>( threadIdx.x == 0 ? prevHighProb : smem[threadIdx.x - 1] + prevHighProb); bool inBucket = (cat < categories) && (!(sample >= curBucket) && (sample >= prevBucket) && (dist_val > zero)); if (inBucket) { // We're done; we have the sample // Torch indices are 1-based atomicMax(&foundPos, cat); found = true; } // Store the previous scan's high value for future use prevHighProb = prevHighProb + smem[blockDim.x - 1]; __syncthreads(); } if (threadIdx.x == 0) { if (found) { dest[curDist] = foundPos; } else { // This should address a rare bug where we don't select a valid index. This likely occurs when // due to floating point arithmetic rounding errors, our cumulative sum does not add up to 1, but // and our uniform sample is greater than this value. In this case we likely have unitialized memory // in dest[curDist]. So basically we will loop through the distribution and pick the largest index // where the distribution is non-zero. This is obviously terribly inefficient, but due to the // rarity in which this occurs, this should not be an issue. for (int cat = categories - 1; cat >= 0; --cat) { if (dist[curDist * stride_dist + cat * stride_categories] > zero) { dest[curDist] = cat; break; } } } } } // update presence mask int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= distributions * categories) { return; } int dist_idx = index / categories; int cat_idx = index % categories; if (dest[dist_idx] == cat_idx) { d_presence_mask[index] = 1; } } // Only support n_sample = 1 void TorchMultinomialKernelLauncher(float* d_input, float* d_sampled, int32_t* d_output, int batch_size, int vocab_size, int* d_presence_mask, hipStream_t stream) { // Store the props in class variables int device; CUDA_CALL_THROW(hipGetDevice(&device)); hipDeviceProp_t props; CUDA_CALL_THROW(hipGetDeviceProperties(&props, device)); int numSM = props.multiProcessorCount; int maxThreads = props.maxThreadsPerBlock; int warp_size = 32; // at::cuda::warp_size(); int requiredWarps = (vocab_size + warp_size - 1) / warp_size; int requiredThreads = ::min(maxThreads, requiredWarps * warp_size); int requiredShared = requiredThreads * sizeof(float); dim3 block(requiredThreads); dim3 grid(::min(batch_size, numSM * 4)); hipLaunchKernelGGL(( sampleMultinomialOnce<float, float>) , dim3(grid), dim3(block), requiredShared, stream, d_output, batch_size, vocab_size, d_sampled, d_input, vocab_size, 1, d_presence_mask); } __global__ void UpdateDecoderMaskedMultiHeadAttentionCacheIndirectionKernel(int32_t* tgt_indir_cache, const int32_t* src_indir_cache, const int32_t* beam_ids, int batch_size, int beam_width, int input_seq_length, int max_seq_length, int current_length) { int time_step = threadIdx.x + blockIdx.x * blockDim.x; int bb_id = threadIdx.y + blockIdx.y * blockDim.y; const int batch_id = bb_id / beam_width; const int beam_id = bb_id % beam_width; if (bb_id >= beam_width * batch_size || time_step >= current_length) { return; } const int src_beam = beam_ids[batch_id * beam_width + beam_id] % beam_width; const int tgt_offset = batch_id * beam_width * max_seq_length + beam_id * max_seq_length + time_step; if (time_step < input_seq_length) { // For time steps that correspond to the input sequence, // the beam that it comes from is always 0. tgt_indir_cache[tgt_offset] = static_cast<int32_t>(0); } else if (time_step == (current_length - 1)) { // For the final (newly generated) time step, // the beam that it comes from is always the beam that we // are currently processing (i.e.) from this point on, these time-steps // form the new beams. tgt_indir_cache[tgt_offset] = static_cast<int32_t>(beam_id); } else { // For all other time-steps, we look up the source indirection, to // see which beam it came from based on the `src_beam`. const int src_offset = batch_id * beam_width * max_seq_length + src_beam * max_seq_length + time_step; tgt_indir_cache[tgt_offset] = src_indir_cache[src_offset]; } } void UpdateDecoderMaskedMultiHeadAttentionCacheIndirection(int32_t* tgt_indir_cache, const int32_t* src_indir_cache, const int32_t* beam_ids, int batch_size, int beam_width, int input_seq_length, int max_seq_length, int current_length, hipStream_t stream) { const dim3 block(32); const dim3 grid((current_length + block.x - 1) / block.x, batch_size * beam_width); hipLaunchKernelGGL(( UpdateDecoderMaskedMultiHeadAttentionCacheIndirectionKernel), dim3(grid), dim3(block), 0, stream, tgt_indir_cache, src_indir_cache, beam_ids, batch_size, beam_width, input_seq_length, max_seq_length, current_length); } } // namespace cuda } // namespace contrib } // namespace onnxruntime
a2e25cac83489c059973e211a03977e058eb6f55.cu
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "core/providers/cuda/cuda_common.h" #include "core/providers/cuda/cu_inc/common.cuh" #include "cub/util_type.cuh" #include <cub/cub.cuh> #include <cub/device/device_segmented_radix_sort.cuh> #include "contrib_ops/cuda/transformers/generation_cuda_impl.h" namespace onnxruntime { namespace contrib { namespace cuda { __global__ void InitKernel(float* beam_scores, int num_beams, int total_elements) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < total_elements) { int beam_index = index % num_beams; beam_scores[index] = beam_index > 0 ? static_cast<float>(-1e9) : 0.0f; } } void LaunchInitKernel( float* beam_scores, int batch_size, int num_beams, cudaStream_t stream) { int total_elements = batch_size * num_beams; constexpr int blockSize = 256; const int gridSize = (total_elements + blockSize - 1) / blockSize; InitKernel<<<gridSize, blockSize, 0, stream>>>(beam_scores, num_beams, total_elements); } __global__ void NextTokenKernel(const int64_t* next_token_indices, int32_t* next_indices, int32_t* next_tokens, int vocab_size, int total_elements) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < total_elements) { next_indices[index] = next_token_indices[index] / vocab_size; next_tokens[index] = next_token_indices[index] % vocab_size; } } void LaunchNextTokenKernel(const int64_t* next_token_indices, int32_t* next_indices, int32_t* next_tokens, int batch_size, int top_k, int vocab_size, cudaStream_t stream) { int total_elements = batch_size * top_k; constexpr int blockSize = 256; const int gridSize = (total_elements + blockSize - 1) / blockSize; NextTokenKernel<<<gridSize, blockSize, 0, stream>>>(next_token_indices, next_indices, next_tokens, vocab_size, total_elements); } template <typename T> __global__ void LogitsProcessKernel( T* next_token_scores, const int* vocab_mask, const int* prefix_vocab_mask, const int* presence_mask, float presence_penalty, float temperature, int num_beams, int vocab_size, int padded_vocab_size, int total_elements, int demote_token_id, int32_t* sequences, int max_sequence_length, int current_sequence_length, float repetition_penalty, int no_repeat_ngram_size) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < total_elements) { int batch_beam_index = index / padded_vocab_size; int word_id = index % padded_vocab_size; if (word_id >= vocab_size) { // Set any value within the padding region to the lowest value so that it isn't picked next_token_scores[index] = cub::FpLimits<T>::Lowest(); } else { // RepetitionPenaltyLogitsProcessor if (repetition_penalty != 1.0f) { int32_t* current_sequence = sequences + batch_beam_index * max_sequence_length; bool found = false; for (int i = 0; i < current_sequence_length; i++) { if (current_sequence[i] == word_id) { found = true; break; } } if (found) { float score = (float)next_token_scores[index]; next_token_scores[index] = (T)(score < 0 ? score * repetition_penalty : score / repetition_penalty); } } // NoRepeatNGramLogitsProcessor if (no_repeat_ngram_size > 0 && current_sequence_length >= no_repeat_ngram_size) { int32_t* current_sequence = sequences + batch_beam_index * max_sequence_length; bool found = false; for (int i = no_repeat_ngram_size - 1; i < current_sequence_length; i++) { if (current_sequence[i] == word_id) { // last token of n-gram matched found = true; for (int j = 0; j < no_repeat_ngram_size - 1; j++) { // match the remaining N-1 tokens if (current_sequence[i - j - 1] != current_sequence[current_sequence_length - 1 - j]) { found = false; break; } } if (found) { break; } } } if (found) { next_token_scores[index] = cub::FpLimits<T>::Lowest(); return; } } // VocabMaskLogitsProcessor if (vocab_mask != nullptr && vocab_mask[word_id] == 0) { next_token_scores[index] = cub::FpLimits<T>::Lowest(); return; } // PrefixVocabMaskLogitsProcessor int batch_id = batch_beam_index / num_beams; if (prefix_vocab_mask != nullptr && prefix_vocab_mask[batch_id * vocab_size + word_id] == 0) { next_token_scores[index] = cub::FpLimits<T>::Lowest(); return; } // MinLengthLogitsProcessor if (word_id == demote_token_id) { next_token_scores[index] = cub::FpLimits<T>::Lowest(); } // PresencePenaltyLogitsProcessor if (presence_mask != nullptr && presence_mask[index] == 1) { float score = (float)next_token_scores[index] - presence_penalty; next_token_scores[index] = (T)score; } // TemperatureLogitsProcessor if (temperature != 1.0f) { float score = (float)(next_token_scores[index]); next_token_scores[index] = (T)(score / temperature); } } } } template <typename T> void LaunchLogitsProcessKernel( T* next_token_scores, const int* vocab_mask, const int* prefix_vocab_mask, int* presence_mask, float presence_penalty, float temperature, int batch_size, int num_beams, int vocab_size, int padded_vocab_size, int demote_token_id, int32_t* sequences, int max_sequence_length, int current_sequence_length, float repetition_penalty, int no_repeat_ngram_size, cudaStream_t stream) { int total_elements = batch_size * num_beams * padded_vocab_size; constexpr int blockSize = 256; const int gridSize = (total_elements + blockSize - 1) / blockSize; LogitsProcessKernel<T><<<gridSize, blockSize, 0, stream>>>( next_token_scores, vocab_mask, prefix_vocab_mask, presence_mask, presence_penalty, temperature, num_beams, vocab_size, padded_vocab_size, total_elements, demote_token_id, sequences, max_sequence_length, current_sequence_length, repetition_penalty, no_repeat_ngram_size); } // Instantiation template void LaunchLogitsProcessKernel( float* next_token_scores, const int* vocab_mask, const int* prefix_vocab_mask, int* presence_mask, float presence_penalty, float temperature, int batch_size, int num_beams, int vocab_size, int padded_vocab_size, int demote_token_id, int32_t* sequences, int max_sequence_length, int current_sequence_length, float repetition_penalty, int no_repeat_ngram_size, cudaStream_t stream); template void LaunchLogitsProcessKernel( half* next_token_scores, const int* vocab_mask, const int* prefix_vocab_mask, int* presence_mask, float presence_penalty, float temperature, int batch_size, int num_beams, int vocab_size, int padded_vocab_size, int demote_token_id, int32_t* sequences, int max_sequence_length, int current_sequence_length, float repetition_penalty, int no_repeat_ngram_size, cudaStream_t stream); __global__ void AddProbsKernel(float* log_probs, float* cum_log_probs, const int vocab_size, const int total_elements) { int index = blockIdx.x * blockDim.x + threadIdx.x; int batch_beam_index = index / vocab_size; if (index < total_elements) log_probs[index] += cum_log_probs[batch_beam_index]; } template <typename T> void LaunchAddProbsKernel(T* log_probs, T* cum_log_probs, const int batch_size, const int num_beams, const int vocab_size, cudaStream_t stream) { int total_elements = batch_size * num_beams * vocab_size; constexpr int blockSize = 256; const int gridSize = (total_elements + blockSize - 1) / blockSize; AddProbsKernel<<<gridSize, blockSize, 0, stream>>>(log_probs, cum_log_probs, vocab_size, total_elements); } template void LaunchAddProbsKernel( float* log_probs, float* cum_log_probs, const int batch_size, const int num_beams, const int vocab_size, cudaStream_t stream); template <typename T> __global__ void UpdateGptInputsKernel(const T* old_mask_data, T* mask_data, int32_t* next_positions, int batch_beam_size, int current_length) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < batch_beam_size * current_length) { // Update attention mask. int i = index / current_length; int j = index % current_length; mask_data[index] = (j < current_length - 1) ? old_mask_data[i * (current_length - 1) + j] : static_cast<T>(1); if (next_positions != nullptr) { // Update sequence length (or next positions). if (index < batch_beam_size) { next_positions[index]++; } } } } void LaunchUpdateGptKernel(const int32_t* old_mask_data, int32_t* mask_data, int32_t* next_positions, int batch_beam_size, int current_length, cudaStream_t stream) { assert(current_length > 0); int total_elements = batch_beam_size * current_length; constexpr int blockSize = 256; const int gridSize = (total_elements + blockSize - 1) / blockSize; UpdateGptInputsKernel<int32_t><<<gridSize, blockSize, 0, stream>>>( old_mask_data, mask_data, next_positions, batch_beam_size, current_length); } template <typename T> void GetTempStorageSize(const T* d_keys_in, const int* d_values_in, int* d_offsets, int num_items, int num_segments, cudaStream_t stream, bool is_descending, size_t& temp_storage_bytes) { if (is_descending) { CUDA_CALL_THROW(cub::DeviceSegmentedRadixSort::SortPairsDescending(nullptr, temp_storage_bytes, d_keys_in, (T*)nullptr, d_values_in, (int*)nullptr, num_items, num_segments, d_offsets, d_offsets + 1, 0, sizeof(T) * 8, stream)); } else { CUDA_CALL_THROW(cub::DeviceSegmentedRadixSort::SortPairs(nullptr, temp_storage_bytes, d_keys_in, (T*)nullptr, d_values_in, (int*)nullptr, num_items, num_segments, d_offsets, d_offsets + 1, 0, sizeof(T) * 8, stream)); } } template void GetTempStorageSize( const float* d_keys_in, const int* d_values_in, int* d_offsets, int num_items, int num_segments, cudaStream_t stream, bool is_descending, size_t& temp_storage_bytes); template void GetTempStorageSize( const half* d_keys_in, const int* d_values_in, int* d_offsets, int num_items, int num_segments, cudaStream_t stream, bool is_descending, size_t& temp_storage_bytes); // TODO: merge to one kernel __global__ void SetupParamsKernel(int* d_values_in, int* d_offsets, int batch_size, int vocab_size) { int index = blockIdx.x * blockDim.x + threadIdx.x; int total_elements = batch_size * vocab_size; if (index < total_elements) { d_values_in[index] = index % vocab_size; } if (index < batch_size + 1) { d_offsets[index] = index * vocab_size; } } void LaunchSetupParamsKernel(int* d_values_in, int* d_offsets, int batch_size, int vocab_size, cudaStream_t stream) { int total_elements = batch_size * vocab_size; constexpr int blockSize = 256; const int gridSize = (total_elements + blockSize - 1) / blockSize; SetupParamsKernel<<<gridSize, blockSize, 0, stream>>>(d_values_in, d_offsets, batch_size, vocab_size); } template <typename T> void LaunchSortPairs(void* d_temp_storage, size_t temp_storage_bytes, const T* d_keys_in, T* d_keys_out, const int* d_values_in, int* d_values_out, int num_items, int num_segments, int* d_offsets, cudaStream_t stream, bool is_descending) { if (is_descending) { CUDA_CALL_THROW(cub::DeviceSegmentedRadixSort::SortPairsDescending(d_temp_storage, temp_storage_bytes, d_keys_in, d_keys_out, d_values_in, d_values_out, num_items, num_segments, d_offsets, d_offsets + 1, 0, sizeof(T) * 8, stream)); } else { CUDA_CALL_THROW(cub::DeviceSegmentedRadixSort::SortPairs(d_temp_storage, temp_storage_bytes, d_keys_in, d_keys_out, d_values_in, d_values_out, num_items, num_segments, d_offsets, d_offsets + 1, 0, sizeof(T) * 8, stream)); } } template void LaunchSortPairs(void* d_temp_storage, size_t temp_storage_bytes, const float* d_keys_in, float* d_keys_out, const int* d_values_in, int* d_values_out, int num_items, int num_segments, int* d_offsets, cudaStream_t stream, bool is_descending); template void LaunchSortPairs(void* d_temp_storage, size_t temp_storage_bytes, const half* d_keys_in, half* d_keys_out, const int* d_values_in, int* d_values_out, int num_items, int num_segments, int* d_offsets, cudaStream_t stream, bool is_descending); // A stateful callback functor that maintains a running prefix to be applied // during consecutive scan operations. struct BlockPrefixCallbackOp { float running_total; // running prefix __device__ BlockPrefixCallbackOp(float running_total) : running_total(running_total) {} // Callback operator to be entered by the first warp of threads in the block. // Thread-0 is responsible for returning a value for seeding the block-wide scan. __device__ float operator()(float block_aggregate) { float old_prefix = running_total; running_total += block_aggregate; return old_prefix; } }; template <typename T, int kBlockSize> __global__ void FilterLogitsKernelCustom(float* d_sorted_logits_in, const int* d_sorted_indices, T* d_logits_in_out, float top_p_threshold, float filter_value, int batch_size, int vocab_size) { int vocab_idx = threadIdx.x; int batch_id = blockIdx.x; int offset = batch_id * vocab_size; typedef cub::BlockScan<float, kBlockSize> BlockScan; __shared__ typename BlockScan::TempStorage temp_storage; BlockPrefixCallbackOp prefix_op(0); for (int idx = vocab_idx; idx < vocab_size; idx += kBlockSize) { float sum = d_sorted_logits_in[offset + idx]; BlockScan(temp_storage).ExclusiveSum(sum, sum, prefix_op); __syncthreads(); if (sum >= top_p_threshold) { int original_index = offset + d_sorted_indices[offset + idx]; d_logits_in_out[original_index] = (T)filter_value; } } } template <typename T, int kBlockSize> __global__ void FilterLogitsKernel(float* d_sorted_logits_in, const int* d_sorted_indices, T* d_logits_in_out, float top_p_threshold, float filter_value, int min_tokens_to_keep, int batch_size, int vocab_size) { int vocab_idx = threadIdx.x; int batch_id = blockIdx.x; int offset = batch_id * vocab_size; typedef cub::BlockScan<float, kBlockSize> BlockScan; __shared__ typename BlockScan::TempStorage temp_storage; BlockPrefixCallbackOp prefix_op(0); for (int idx = vocab_idx; idx < vocab_size; idx += kBlockSize) { float sum = d_sorted_logits_in[offset + idx]; BlockScan(temp_storage).InclusiveSum(sum, sum, prefix_op); __syncthreads(); if (sum <= top_p_threshold) { if (idx + min_tokens_to_keep < vocab_size) { int original_index = offset + d_sorted_indices[offset + idx]; d_logits_in_out[original_index] = (T)filter_value; } } } } template <typename T> void LaunchFilterLogitsKernel(float* d_sorted_logits_in, const int* d_sorted_indices, T* d_logits_in_out, float top_p, float filter_value, int min_tokens_to_keep, int batch_size, int vocab_size, cudaStream_t stream, bool is_descending) { constexpr int kBlockSize = 256; if (is_descending) { FilterLogitsKernelCustom<T, kBlockSize><<<batch_size, kBlockSize, 0, stream>>>(d_sorted_logits_in, d_sorted_indices, d_logits_in_out, top_p, filter_value, batch_size, vocab_size); } else { FilterLogitsKernel<T, kBlockSize><<<batch_size, kBlockSize, 0, stream>>>(d_sorted_logits_in, d_sorted_indices, d_logits_in_out, 1 - top_p, filter_value, min_tokens_to_keep, batch_size, vocab_size); } } template void LaunchFilterLogitsKernel(float* d_sorted_logits_in, const int* d_sorted_indices, float* d_logits_in_out, float top_p, float filter_value, int min_tokens_to_keep, int batch_size, int vocab_size, cudaStream_t stream, bool is_descending); template void LaunchFilterLogitsKernel(float* d_sorted_logits_in, const int* d_sorted_indices, half* d_logits_in_out, float top_p, float filter_value, int min_tokens_to_keep, int batch_size, int vocab_size, cudaStream_t stream, bool is_descending); // Ref: https://github.com/pytorch/pytorch/blob/release/1.13/aten/src/ATen/native/cuda/MultinomialKernel.cu template <typename scalar_t, typename accscalar_t> __global__ void sampleMultinomialOnce(int32_t* dest, int distributions, int categories, scalar_t* sampled, scalar_t* dist, int stride_dist, // dist->stride(0) int stride_categories, // dist->stride(1) int* d_presence_mask) { extern __shared__ unsigned char my_smem[]; __shared__ bool found; __shared__ unsigned foundPos; accscalar_t* smem = reinterpret_cast<accscalar_t*>(my_smem); accscalar_t accZero = static_cast<accscalar_t>(0); scalar_t zero = static_cast<scalar_t>(0); for (int curDist = blockIdx.x; curDist < distributions; curDist += gridDim.x) { // Assume sum = 1 in Top P sampling as the input is softmaxed. accscalar_t sum = 1; // Broadcast sum and sample value if (threadIdx.x == 0) { // Make sure the sum of our distribution didn't overflow // CUDA_KERNEL_ASSERT(!_isinf(val)); // CUDA_KERNEL_ASSERT(sum > accZero); foundPos = 0; smem[0] = sum; smem[1] = sampled[curDist]; } __syncthreads(); sum = smem[0]; scalar_t sample = static_cast<scalar_t>(smem[1]); __syncthreads(); if (sum == accZero) { // Choose the first element if (threadIdx.x == 0) { dest[curDist] = 0; } continue; } int chunks = (categories + (int)blockDim.x - 1) / blockDim.x; accscalar_t prevHighProb = accZero; found = false; for (int chunk = 0; chunk < chunks && !found; ++chunk) { // All threads in bounds load a value int cat = chunk * blockDim.x + threadIdx.x; accscalar_t dist_val = cat < categories ? static_cast<accscalar_t>(dist[curDist * stride_dist + cat * stride_categories]) / sum : accZero; smem[threadIdx.x] = dist_val; __syncthreads(); // Perform an inclusive prefix sum of the shared memory contents for (int offset = 1; offset < blockDim.x; offset *= 2) { accscalar_t val = accZero; if (threadIdx.x >= offset) { val = smem[threadIdx.x - offset] + smem[threadIdx.x]; } __syncthreads(); if (threadIdx.x >= offset) { smem[threadIdx.x] = val; } __syncthreads(); } // Each thread will check to see if the sample falls in its bucket scalar_t curBucket = static_cast<scalar_t>(smem[threadIdx.x] + prevHighProb); scalar_t prevBucket = static_cast<scalar_t>( threadIdx.x == 0 ? prevHighProb : smem[threadIdx.x - 1] + prevHighProb); bool inBucket = (cat < categories) && (!(sample >= curBucket) && (sample >= prevBucket) && (dist_val > zero)); if (inBucket) { // We're done; we have the sample // Torch indices are 1-based atomicMax(&foundPos, cat); found = true; } // Store the previous scan's high value for future use prevHighProb = prevHighProb + smem[blockDim.x - 1]; __syncthreads(); } if (threadIdx.x == 0) { if (found) { dest[curDist] = foundPos; } else { // This should address a rare bug where we don't select a valid index. This likely occurs when // due to floating point arithmetic rounding errors, our cumulative sum does not add up to 1, but // and our uniform sample is greater than this value. In this case we likely have unitialized memory // in dest[curDist]. So basically we will loop through the distribution and pick the largest index // where the distribution is non-zero. This is obviously terribly inefficient, but due to the // rarity in which this occurs, this should not be an issue. for (int cat = categories - 1; cat >= 0; --cat) { if (dist[curDist * stride_dist + cat * stride_categories] > zero) { dest[curDist] = cat; break; } } } } } // update presence mask int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= distributions * categories) { return; } int dist_idx = index / categories; int cat_idx = index % categories; if (dest[dist_idx] == cat_idx) { d_presence_mask[index] = 1; } } // Only support n_sample = 1 void TorchMultinomialKernelLauncher(float* d_input, float* d_sampled, int32_t* d_output, int batch_size, int vocab_size, int* d_presence_mask, cudaStream_t stream) { // Store the props in class variables int device; CUDA_CALL_THROW(cudaGetDevice(&device)); cudaDeviceProp props; CUDA_CALL_THROW(cudaGetDeviceProperties(&props, device)); int numSM = props.multiProcessorCount; int maxThreads = props.maxThreadsPerBlock; int warp_size = 32; // at::cuda::warp_size(); int requiredWarps = (vocab_size + warp_size - 1) / warp_size; int requiredThreads = std::min(maxThreads, requiredWarps * warp_size); int requiredShared = requiredThreads * sizeof(float); dim3 block(requiredThreads); dim3 grid(std::min(batch_size, numSM * 4)); sampleMultinomialOnce<float, float> <<<grid, block, requiredShared, stream>>>(d_output, batch_size, vocab_size, d_sampled, d_input, vocab_size, 1, d_presence_mask); } __global__ void UpdateDecoderMaskedMultiHeadAttentionCacheIndirectionKernel(int32_t* tgt_indir_cache, const int32_t* src_indir_cache, const int32_t* beam_ids, int batch_size, int beam_width, int input_seq_length, int max_seq_length, int current_length) { int time_step = threadIdx.x + blockIdx.x * blockDim.x; int bb_id = threadIdx.y + blockIdx.y * blockDim.y; const int batch_id = bb_id / beam_width; const int beam_id = bb_id % beam_width; if (bb_id >= beam_width * batch_size || time_step >= current_length) { return; } const int src_beam = beam_ids[batch_id * beam_width + beam_id] % beam_width; const int tgt_offset = batch_id * beam_width * max_seq_length + beam_id * max_seq_length + time_step; if (time_step < input_seq_length) { // For time steps that correspond to the input sequence, // the beam that it comes from is always 0. tgt_indir_cache[tgt_offset] = static_cast<int32_t>(0); } else if (time_step == (current_length - 1)) { // For the final (newly generated) time step, // the beam that it comes from is always the beam that we // are currently processing (i.e.) from this point on, these time-steps // form the new beams. tgt_indir_cache[tgt_offset] = static_cast<int32_t>(beam_id); } else { // For all other time-steps, we look up the source indirection, to // see which beam it came from based on the `src_beam`. const int src_offset = batch_id * beam_width * max_seq_length + src_beam * max_seq_length + time_step; tgt_indir_cache[tgt_offset] = src_indir_cache[src_offset]; } } void UpdateDecoderMaskedMultiHeadAttentionCacheIndirection(int32_t* tgt_indir_cache, const int32_t* src_indir_cache, const int32_t* beam_ids, int batch_size, int beam_width, int input_seq_length, int max_seq_length, int current_length, cudaStream_t stream) { const dim3 block(32); const dim3 grid((current_length + block.x - 1) / block.x, batch_size * beam_width); UpdateDecoderMaskedMultiHeadAttentionCacheIndirectionKernel<<<grid, block, 0, stream>>>(tgt_indir_cache, src_indir_cache, beam_ids, batch_size, beam_width, input_seq_length, max_seq_length, current_length); } } // namespace cuda } // namespace contrib } // namespace onnxruntime
c87a606c54d684e098732171a9cd38dac90991ae.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /************************************************************** File : lcsGetStartOffsetInParticles.cu Author : Mingcheng Chen Last Update : January 29th, 2013 ***************************************************************/ #include <stdio.h> #define BLOCK_SIZE 1024 __global__ void CollectEveryKElementKernel(int* input, int *output, int k, int length) { int globalID = blockDim.x * blockIdx.x + threadIdx.x; if (globalID < length) output[globalID] = input[globalID * k]; } extern "C" void CollectEveryKElement(int *input, int *output, int k, int length) { dim3 dimBlock(BLOCK_SIZE, 1, 1); dim3 dimGrid((length - 1) / dimBlock.x + 1, 1, 1); hipLaunchKernelGGL(( CollectEveryKElementKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, input, output, k, length); hipError_t err = hipDeviceSynchronize(); if (err) { hipGetErrorString(err); exit(0); } }
c87a606c54d684e098732171a9cd38dac90991ae.cu
/************************************************************** File : lcsGetStartOffsetInParticles.cu Author : Mingcheng Chen Last Update : January 29th, 2013 ***************************************************************/ #include <stdio.h> #define BLOCK_SIZE 1024 __global__ void CollectEveryKElementKernel(int* input, int *output, int k, int length) { int globalID = blockDim.x * blockIdx.x + threadIdx.x; if (globalID < length) output[globalID] = input[globalID * k]; } extern "C" void CollectEveryKElement(int *input, int *output, int k, int length) { dim3 dimBlock(BLOCK_SIZE, 1, 1); dim3 dimGrid((length - 1) / dimBlock.x + 1, 1, 1); CollectEveryKElementKernel<<<dimGrid, dimBlock>>>(input, output, k, length); cudaError_t err = cudaDeviceSynchronize(); if (err) { cudaGetErrorString(err); exit(0); } }
84c53f2c8fe0ca2bb96412e3572fb8fe38a1d2e8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "GPUHelpers.h" // Work around for the "IUnknown" error, excludes // some windows header which does not play well with cuda #ifdef _WIN32 #define WIN32_LEAN_AND_MEAN #endif #pragma warning(push) #pragma warning(disable:4996 4800 4610 4515 4512 4510 4458 4324 4201 4100) #include <hipcub/hipcub.hpp> #pragma warning(pop) using hipcub::BlockScan; using cub::BLOCK_SCAN_RAKING; namespace seba { namespace { const int DCT_SIZE = 8; const int DCT_SIZE2 = DCT_SIZE * DCT_SIZE; __constant__ uint32_t c_dc_luminance_huff_code[256]; __constant__ uint8_t c_dc_luminance_huff_size[256]; __constant__ uint32_t c_dc_chrominance_huff_code[256]; __constant__ uint8_t c_dc_chrominance_huff_size[256]; __constant__ uint32_t c_ac_luminance_huff_code[256]; __constant__ uint8_t c_ac_luminance_huff_size[256]; __constant__ uint32_t c_ac_chrominance_huff_code[256]; __constant__ uint8_t c_ac_chrominance_huff_size[256]; __device__ inline int NbBits(uint32_t i) { return 32 - __clz(i); } __device__ inline uint32_t BigEndian(uint32_t i) { return __byte_perm(i, i, 0b0000000100100011); } class StreamWriter { public: __host__ __device__ StreamWriter(uint32_t *bitstream, uint32_t bitOffset) : m_data((uint8_t *)bitstream), m_byteOffset(4 * (bitOffset >> 5)), m_bitOffset(bitOffset & 31), m_buffer(0) { } __device__ void WriteBits(uint16_t val, uint8_t nbits) { uint32_t bitsAvail = 32 - m_bitOffset; val = val & ((1 << nbits) - 1); if (nbits > bitsAvail) { // Remove the bits that won't fit in the current word nbits = nbits - bitsAvail; m_buffer |= val >> nbits; Flush(); bitsAvail = 32; } m_buffer |= val << (bitsAvail - nbits); m_bitOffset += nbits; // Note: the case where m_bitOffset == 32, will be handled the next call: bitsAvail will be 0 and we will go in the if true branch } __device__ void Flush() { atomicOr((uint32_t *)(m_data + m_byteOffset), BigEndian(m_buffer)); m_buffer = 0; m_byteOffset += 4; m_bitOffset = 0; } private: uint8_t* m_data; uint32_t m_byteOffset; uint32_t m_bitOffset; uint32_t m_buffer; }; } inline __device__ void SelectHuffmanTable(bool isLuminance, bool isDC, uint8_t *&huffSize, uint32_t *&huffCode, uint8_t *&acSize, uint32_t *&acCode) { // We select relevant huffman tables: we got different ones for luma/chroma as well as DC/AC if (isLuminance) { huffSize = acSize = c_ac_luminance_huff_size; huffCode = acCode = c_ac_luminance_huff_code; if (isDC) { huffSize = c_dc_luminance_huff_size; huffCode = c_dc_luminance_huff_code; } } else { huffSize = acSize = c_ac_chrominance_huff_size; huffCode = acCode = c_ac_chrominance_huff_code; if (isDC) { huffSize = c_dc_chrominance_huff_size; huffCode = c_dc_chrominance_huff_code; } } } void InitializeHuffmanTables( const uint8_t *dc_luminance_val_spec, const uint8_t *dc_luminance_bits_spec, const uint8_t *dc_chrominance_val_spec, const uint8_t *dc_chrominance_bits_spec, const uint8_t *ac_luminance_val_spec, const uint8_t *ac_luminance_bits_spec, const uint8_t *ac_chrominance_val_spec, const uint8_t *ac_chrominance_bits_spec ) { uint32_t ehufco[256]; uint8_t ehufsi[256]; DeriveHuffmanTable(dc_luminance_bits_spec, dc_luminance_val_spec, true, ehufco, ehufsi); hipMemcpyToSymbol(c_dc_luminance_huff_code, ehufco, sizeof(ehufco)); hipMemcpyToSymbol(c_dc_luminance_huff_size, ehufsi, sizeof(ehufsi)); DeriveHuffmanTable(dc_chrominance_bits_spec, dc_chrominance_val_spec, true, ehufco, ehufsi); hipMemcpyToSymbol(c_dc_chrominance_huff_code, ehufco, sizeof(ehufco)); hipMemcpyToSymbol(c_dc_chrominance_huff_size, ehufsi, sizeof(ehufsi)); DeriveHuffmanTable(ac_luminance_bits_spec, ac_luminance_val_spec, false, ehufco, ehufsi); hipMemcpyToSymbol(c_ac_luminance_huff_code, ehufco, sizeof(ehufco)); hipMemcpyToSymbol(c_ac_luminance_huff_size, ehufsi, sizeof(ehufsi)); DeriveHuffmanTable(ac_chrominance_bits_spec, ac_chrominance_val_spec, false, ehufco, ehufsi); hipMemcpyToSymbol(c_ac_chrominance_huff_code, ehufco, sizeof(ehufco)); hipMemcpyToSymbol(c_ac_chrominance_huff_size, ehufsi, sizeof(ehufsi)); } // That kernel is doing the zero run length as well as entropy coding of a DCT block. __global__ void rleDpcmHuff_gpu(const int16_t *dct, uint32_t *bitstream, uint8_t *huffmanBlockTail, uint32_t *huffmanBlockSize, int channel, sebaJpegFormat_t format, int ri) { int dctBlockId = gridDim.x * blockIdx.y + blockIdx.x; int dctOffset = dctBlockId * DCT_SIZE2 + threadIdx.x; // Note restartDpcm will be true for the first block (dctOffset == 0) __shared__ uint8_t rleRunOffsets[DCT_SIZE2]; // The first coeff of a block is the DC offset of the signal we decomposed against the 2D-cosine basis bool isDC = threadIdx.x == 0; bool isLuminance = channel == 0; bool restartDpcm = false; int mcuBlockId; switch (format) { case SEBA_JPEG_Y: mcuBlockId = dctBlockId; restartDpcm = 0 == (mcuBlockId % ri); break; case SEBA_JPEG_444: mcuBlockId = 3 * dctBlockId + channel; restartDpcm = 0 == (dctBlockId % ri); break; case SEBA_JPEG_422: if (isLuminance) { mcuBlockId = ((dctBlockId >> 1) << 2) + (dctBlockId & 1); restartDpcm = 0 == (mcuBlockId % (4 * ri)); } else { mcuBlockId = (dctBlockId << 2) + 1 + channel; restartDpcm = 0 == (dctBlockId % ri); } break; case SEBA_JPEG_420: { if (isLuminance) { int macroblockId = (gridDim.x >> 1) * (blockIdx.y >> 1) + (blockIdx.x >> 1); mcuBlockId = 6 * macroblockId + ((blockIdx.y & 1) << 1) + (blockIdx.x & 1); restartDpcm = 0 == (mcuBlockId % (6 * ri)); } else { mcuBlockId = 6 * dctBlockId + 3 + channel; restartDpcm = 0 == (dctBlockId % ri); } } break; } // 0 restart interval means no restart if (ri == 0) { restartDpcm = false; } int streamOffset = mcuBlockId * DCT_SIZE2; bitstream += streamOffset; // We select relevant huffman tables: we got different ones for luma/chroma as well as DC/AC uint8_t *huffSize; uint32_t *huffCode; uint8_t *acSize; uint32_t *acCode; SelectHuffmanTable(isLuminance, isDC, huffSize, huffCode, acSize, acCode); // Copy the block in shared mem in *zig-zag* *order*, the higher the zig-zag index of a coeff, the higher frequency the corresponding basis function, // the more likely it was quantized to zero int16_t sym = dct[dctOffset]; #ifdef SEBA_VIDEO_CUDA_DEBUG const int dbgBlock = 0, dbgChannel = 2; if (isDC && channel == dbgChannel && dctBlockId == dbgBlock) { for (int j = 0; j < DCT_SIZE2; ++j) { printf("%5d ", dct[dctBlockId * DCT_SIZE2 + j]); } printf("\n"); } #endif // Step 1: // zero run length encoding: we encode DC, and non zero ACs uint32_t isEncoded = isDC || sym; // We prefix sum a 2 warp long boolean array using voting operation and bit tricks int warp = threadIdx.x >> 5, lane = threadIdx.x & 31; uint32_t rleOffset = __popc(__ballot(isEncoded) & (((uint32_t)1 << (lane + 1)) - 1)); __shared__ uint32_t otherWarpOffset; if (31 == lane && 0 == warp) { otherWarpOffset = rleOffset; } __syncthreads(); if (1 == warp) { rleOffset += otherWarpOffset; } rleOffset -= isEncoded; // Compress zero runs if (isEncoded) { // We need to remember the offset of uncompressed runs in order to compute the zero run length rleRunOffsets[rleOffset] = threadIdx.x; } // DPCM of the DC coefficient *unless* we restart (restartDpcm is true) // Fetch the previous DC offset from global memory :-S if (isDC && !restartDpcm) { // Interestingly fast video always use a restart value of 8 MCUs when using 422 downsampling with 8x8 DCT blocks // (even if you set it to something different: we try to set it to 16 in fast_video_helper.cpp) // An MCU is worth 16x8 pixels in that case. Could it be that a thread block handles 8 MCUs to avoid fetching the // previous DC coefficients from global memory ? Investigate int prevOffset = dctOffset - DCT_SIZE2; if (format == SEBA_JPEG_420 && isLuminance) { prevOffset = blockIdx.x & 1 ? dctOffset - DCT_SIZE2 : (blockIdx.y & 1 ? (gridDim.x * (blockIdx.y - 1) + blockIdx.x + 1) * DCT_SIZE2 : (blockIdx.x > 0 ? (gridDim.x * (blockIdx.y + 1) + blockIdx.x - 1) * DCT_SIZE2 : dctOffset - DCT_SIZE2)); } sym -= dct[prevOffset]; } // The number of encoded symbols is given by the last value of an *inclusive* scan. // Since we've done an exclusive one to get 0 based offsets, we need to compute it __shared__ int nEncodedSymbols; if (threadIdx.x == DCT_SIZE2 - 1) { nEncodedSymbols = rleOffset + isEncoded; } __syncthreads(); bool isLast = rleOffset == nEncodedSymbols - 1; bool blockHasTrailingZeros = rleRunOffsets[nEncodedSymbols - 1] != DCT_SIZE2 - 1; // Step 2: // simulate huffman variable length encoding uint32_t huffmanSize[1]; huffmanSize[0] = 0; int16_t sym2; uint8_t len; if (isEncoded) { /* This code assumes we are on a two's complement machine */ sym2 = sym; if (sym < 0) { sym = -sym; --sym2; } // Number of zeros before a non zero AC or the (eventually zero) DPCM'ed DC len = isDC ? 0 : threadIdx.x - rleRunOffsets[rleOffset - 1] - 1; uint32_t nbits = (len >> 4) * huffSize[0xF0]; uint8_t magnitudeBits = NbBits(sym); // It is important to 0xF mask as the bitshift operation argument will // be implicitely casted to unsigned int nbits += huffSize[((len & 0xF) << 4) + magnitudeBits]; nbits += magnitudeBits; huffmanSize[0] = nbits; // If the last coef(s) were zero, emit an end-of-block code if (isLast && blockHasTrailingZeros) { huffmanSize[0] += acSize[0]; } } #ifdef SEBA_VIDEO_CUDA_DEBUG if (isDC && channel == dbgChannel && dctBlockId == dbgBlock) { for (int i = 0; i < nEncodedSymbols; ++i) { int l = !i ? 0 : rleRunOffsets[i] - rleRunOffsets[i - 1] - 1; printf("%5d", dct[dctBlockId * DCT_SIZE2 + rleRunOffsets[i]]); if (l) { printf("(%d)", l); } printf(" ", l); } printf("\n"); } #endif // and we sync here again cause the boys are going to prefix sum huffmanSizes // so that a warp might not have saved lastHuffSize before it starts typedef BlockScan<uint32_t, DCT_SIZE2> BlockScan; __shared__ typename BlockScan::TempStorage temp_storage; uint32_t huffmanOffset[1]; BlockScan(temp_storage).ExclusiveSum(huffmanSize, huffmanOffset); bitstream[threadIdx.x] = 0; __syncthreads(); #ifdef SEBA_VIDEO_CUDA_DEBUG if (isDC && channel == dbgChannel && dctBlockId == dbgBlock) { for (int i = 0; i < nEncodedSymbols; ++i) { uint8_t *thuffSize; uint32_t *thuffCode; uint8_t *tacSize; uint32_t *tacCode; SelectHuffmanTable(isLuminance, i == 0, thuffSize, thuffCode, tacSize, tacCode); /* This code assumes we are on a two's complement machine */ int16_t sym = dct[dctBlockId * DCT_SIZE2 + rleRunOffsets[i]], sym2 = sym; if (sym < 0) { sym = -sym; --sym2; } uint8_t len = i == 0 ? 0 : rleRunOffsets[i] - rleRunOffsets[i - 1] - 1; while (len >= 16) { printf("%u %u\n", thuffCode[0xF0], thuffSize[0xF0]); len -= 16; } /* Find the number of bits needed for the magnitude of the coefficient */ /* there must be at least one 1 bit */ uint8_t nbits = NbBits(sym); /* Emit Huffman symbol for run length / number of bits */ int x = (len << 4) + nbits; printf("%u %u\n", thuffCode[x], thuffSize[x]); /* Emit that number of bits of the value, if positive, */ /* or the complement of its magnitude, if negative. */ printf("%u %u\n", (uint32_t)sym2, nbits); if (i == nEncodedSymbols - 1 && blockHasTrailingZeros) { printf("%u %u\n", tacCode[0], tacSize[0]); } } } #endif if (isEncoded) { StreamWriter writer(bitstream, huffmanOffset[0]); #pragma unroll 4 while (len >= 16) { writer.WriteBits(huffCode[0xF0], huffSize[0xF0]); len -= 16; } /* Find the number of bits needed for the magnitude of the coefficient */ /* there must be at least one 1 bit */ uint8_t nbits = NbBits(sym); /* Emit Huffman symbol for run length / number of bits */ int i = (len << 4) + nbits; writer.WriteBits(huffCode[i], huffSize[i]); /* Emit that number of bits of the value, if positive, */ /* or the complement of its magnitude, if negative. */ writer.WriteBits((uint32_t)sym2, nbits); // This has to to use AC tables even if the DC thread is writting the end of block if (isLast && blockHasTrailingZeros) { writer.WriteBits(acCode[0], acSize[0]); } writer.Flush(); } __syncthreads(); if (threadIdx.x == DCT_SIZE2 - 1) { // in order to get the entropy coded size of the block in bits uint32_t total = huffmanOffset[0] + huffmanSize[0]; // thread 0 is writting the *per* *block* information huffmanBlockSize[mcuBlockId] = total; int lastIdx = (uint16_t)ceil((float)total / 8) - 1; uint8_t tail = *((uint8_t *)bitstream + lastIdx); uint8_t secondToTail = lastIdx > 0 ? *((uint8_t *)bitstream + lastIdx - 1) : 0; int extraBits = total & 7; int toShift = (8 - extraBits) & 7; huffmanBlockTail[mcuBlockId] = secondToTail << (8 - toShift) | (tail >> toShift); } } // The idea here is to exchange data between adjacent entropy coded blocks in order to get eveything nicely byte aligned. // For this to work an encoded block must be at least a byte long, which is the case because a DC huffword is at least 3 bits // + the AC empty block huffword is 5 bits long, so it works __global__ void alignBitstream_gpu(uint8_t *bitstream, uint8_t *huffmanBlockTail, uint32_t *huffmanBlockSize, uint32_t *huffmanBlockOffset, int ri) { int byteOffset = threadIdx.x; int mcuBlockId = blockIdx.x; // We need to compute the ID of the last "restart" MCU: int restartMcuBlockId = (mcuBlockId / ri) * ri; if (ri == 0) { restartMcuBlockId = 0; } bool markRestart = mcuBlockId == restartMcuBlockId + ri - 1; //0 == (mcuBlockId + 1) % ri int streamOffset = mcuBlockId * DCT_SIZE2 * sizeof(uint32_t); bitstream += streamOffset; // Here is the trick: we prefixed sum huffman coded block sizes without taking into account restart markers. // Offset must be relative to the last "restart" MCU. Hence uint32_t offset = huffmanBlockOffset[mcuBlockId] - huffmanBlockOffset[restartMcuBlockId]; // In particular when mucBlockId is a restart MCU (restartDpcm is true) offset/toShift is 0. int toShift = offset & 0x7; uint32_t bitSize = huffmanBlockSize[mcuBlockId] + toShift; // We account for our tail being stollen. // The tail of the last block can't be stollen it's padded with 0. // Likewise the tail of a block prior a restart is not stollen, it is padded with 1s if (mcuBlockId < gridDim.x - 1 && !markRestart) { bitSize &= ~7; } uint32_t byteSize = (uint16_t)ceil((float)bitSize / 8); uint32_t escapedLength[1], escapedOffset[1]; escapedLength[0] = 0; uint8_t myByte = 0; if (byteOffset < byteSize) { myByte = bitstream[byteOffset]; if (toShift) { uint8_t prevByte = byteOffset == 0 ? huffmanBlockTail[mcuBlockId - 1] : bitstream[byteOffset - 1]; myByte = prevByte << (8 - toShift) | (myByte >> toShift); } if (markRestart && byteOffset == byteSize - 1) { // If we're going to mark a restart, we need to pad with ones int nones = (8 - (bitSize & 7)) & 7; myByte |= (1 << nones) - 1; // Double check that one } escapedLength[0] = myByte == 0xFF ? 2 : 1; } // Restart interval: we've padded with ones at that stage, now we need to insert the marker (2 bytes) typedef BlockScan<uint32_t, 2 * DCT_SIZE2> BlockScan; __shared__ typename BlockScan::TempStorage temp_storage; BlockScan(temp_storage).ExclusiveSum(escapedLength, escapedOffset); if (byteOffset < byteSize) { int bSize = escapedOffset[0] + escapedLength[0]; bitstream[bSize - 1] = 0; bitstream[escapedOffset[0]] = myByte; if (byteOffset == byteSize - 1) { if (markRestart) { bitstream[bSize] = 0xFF; bitstream[bSize + 1] = 0xD0 | ((mcuBlockId / ri) & 0x7); bSize += 2; } huffmanBlockSize[mcuBlockId] = bSize; } } } __global__ void concatBitstream_gpu(const uint32_t *src, const uint32_t *huffmanBlockSize, uint32_t *huffmanBlockOffset, uint8_t *dst, uint32_t *bytestreamSize) { __shared__ uint32_t block[DCT_SIZE2]; block[threadIdx.x] = src[blockIdx.x * DCT_SIZE2 + threadIdx.x]; __syncthreads(); uint32_t offset = huffmanBlockOffset[blockIdx.x]; dst += offset + threadIdx.x; uint32_t size = huffmanBlockSize[blockIdx.x]; #pragma unroll 4 for (uint32_t i = threadIdx.x; i < size; i += blockDim.x, dst += blockDim.x) { *dst = ((uint8_t *)block)[i]; } if (blockIdx.x == gridDim.x - 1 && threadIdx.x == 0) { *bytestreamSize = offset + size; } } }
84c53f2c8fe0ca2bb96412e3572fb8fe38a1d2e8.cu
#include "GPUHelpers.h" // Work around for the "IUnknown" error, excludes // some windows header which does not play well with cuda #ifdef _WIN32 #define WIN32_LEAN_AND_MEAN #endif #pragma warning(push) #pragma warning(disable:4996 4800 4610 4515 4512 4510 4458 4324 4201 4100) #include <cub/cub.cuh> #pragma warning(pop) using cub::BlockScan; using cub::BLOCK_SCAN_RAKING; namespace seba { namespace { const int DCT_SIZE = 8; const int DCT_SIZE2 = DCT_SIZE * DCT_SIZE; __constant__ uint32_t c_dc_luminance_huff_code[256]; __constant__ uint8_t c_dc_luminance_huff_size[256]; __constant__ uint32_t c_dc_chrominance_huff_code[256]; __constant__ uint8_t c_dc_chrominance_huff_size[256]; __constant__ uint32_t c_ac_luminance_huff_code[256]; __constant__ uint8_t c_ac_luminance_huff_size[256]; __constant__ uint32_t c_ac_chrominance_huff_code[256]; __constant__ uint8_t c_ac_chrominance_huff_size[256]; __device__ inline int NbBits(uint32_t i) { return 32 - __clz(i); } __device__ inline uint32_t BigEndian(uint32_t i) { return __byte_perm(i, i, 0b0000000100100011); } class StreamWriter { public: __host__ __device__ StreamWriter(uint32_t *bitstream, uint32_t bitOffset) : m_data((uint8_t *)bitstream), m_byteOffset(4 * (bitOffset >> 5)), m_bitOffset(bitOffset & 31), m_buffer(0) { } __device__ void WriteBits(uint16_t val, uint8_t nbits) { uint32_t bitsAvail = 32 - m_bitOffset; val = val & ((1 << nbits) - 1); if (nbits > bitsAvail) { // Remove the bits that won't fit in the current word nbits = nbits - bitsAvail; m_buffer |= val >> nbits; Flush(); bitsAvail = 32; } m_buffer |= val << (bitsAvail - nbits); m_bitOffset += nbits; // Note: the case where m_bitOffset == 32, will be handled the next call: bitsAvail will be 0 and we will go in the if true branch } __device__ void Flush() { atomicOr((uint32_t *)(m_data + m_byteOffset), BigEndian(m_buffer)); m_buffer = 0; m_byteOffset += 4; m_bitOffset = 0; } private: uint8_t* m_data; uint32_t m_byteOffset; uint32_t m_bitOffset; uint32_t m_buffer; }; } inline __device__ void SelectHuffmanTable(bool isLuminance, bool isDC, uint8_t *&huffSize, uint32_t *&huffCode, uint8_t *&acSize, uint32_t *&acCode) { // We select relevant huffman tables: we got different ones for luma/chroma as well as DC/AC if (isLuminance) { huffSize = acSize = c_ac_luminance_huff_size; huffCode = acCode = c_ac_luminance_huff_code; if (isDC) { huffSize = c_dc_luminance_huff_size; huffCode = c_dc_luminance_huff_code; } } else { huffSize = acSize = c_ac_chrominance_huff_size; huffCode = acCode = c_ac_chrominance_huff_code; if (isDC) { huffSize = c_dc_chrominance_huff_size; huffCode = c_dc_chrominance_huff_code; } } } void InitializeHuffmanTables( const uint8_t *dc_luminance_val_spec, const uint8_t *dc_luminance_bits_spec, const uint8_t *dc_chrominance_val_spec, const uint8_t *dc_chrominance_bits_spec, const uint8_t *ac_luminance_val_spec, const uint8_t *ac_luminance_bits_spec, const uint8_t *ac_chrominance_val_spec, const uint8_t *ac_chrominance_bits_spec ) { uint32_t ehufco[256]; uint8_t ehufsi[256]; DeriveHuffmanTable(dc_luminance_bits_spec, dc_luminance_val_spec, true, ehufco, ehufsi); cudaMemcpyToSymbol(c_dc_luminance_huff_code, ehufco, sizeof(ehufco)); cudaMemcpyToSymbol(c_dc_luminance_huff_size, ehufsi, sizeof(ehufsi)); DeriveHuffmanTable(dc_chrominance_bits_spec, dc_chrominance_val_spec, true, ehufco, ehufsi); cudaMemcpyToSymbol(c_dc_chrominance_huff_code, ehufco, sizeof(ehufco)); cudaMemcpyToSymbol(c_dc_chrominance_huff_size, ehufsi, sizeof(ehufsi)); DeriveHuffmanTable(ac_luminance_bits_spec, ac_luminance_val_spec, false, ehufco, ehufsi); cudaMemcpyToSymbol(c_ac_luminance_huff_code, ehufco, sizeof(ehufco)); cudaMemcpyToSymbol(c_ac_luminance_huff_size, ehufsi, sizeof(ehufsi)); DeriveHuffmanTable(ac_chrominance_bits_spec, ac_chrominance_val_spec, false, ehufco, ehufsi); cudaMemcpyToSymbol(c_ac_chrominance_huff_code, ehufco, sizeof(ehufco)); cudaMemcpyToSymbol(c_ac_chrominance_huff_size, ehufsi, sizeof(ehufsi)); } // That kernel is doing the zero run length as well as entropy coding of a DCT block. __global__ void rleDpcmHuff_gpu(const int16_t *dct, uint32_t *bitstream, uint8_t *huffmanBlockTail, uint32_t *huffmanBlockSize, int channel, sebaJpegFormat_t format, int ri) { int dctBlockId = gridDim.x * blockIdx.y + blockIdx.x; int dctOffset = dctBlockId * DCT_SIZE2 + threadIdx.x; // Note restartDpcm will be true for the first block (dctOffset == 0) __shared__ uint8_t rleRunOffsets[DCT_SIZE2]; // The first coeff of a block is the DC offset of the signal we decomposed against the 2D-cosine basis bool isDC = threadIdx.x == 0; bool isLuminance = channel == 0; bool restartDpcm = false; int mcuBlockId; switch (format) { case SEBA_JPEG_Y: mcuBlockId = dctBlockId; restartDpcm = 0 == (mcuBlockId % ri); break; case SEBA_JPEG_444: mcuBlockId = 3 * dctBlockId + channel; restartDpcm = 0 == (dctBlockId % ri); break; case SEBA_JPEG_422: if (isLuminance) { mcuBlockId = ((dctBlockId >> 1) << 2) + (dctBlockId & 1); restartDpcm = 0 == (mcuBlockId % (4 * ri)); } else { mcuBlockId = (dctBlockId << 2) + 1 + channel; restartDpcm = 0 == (dctBlockId % ri); } break; case SEBA_JPEG_420: { if (isLuminance) { int macroblockId = (gridDim.x >> 1) * (blockIdx.y >> 1) + (blockIdx.x >> 1); mcuBlockId = 6 * macroblockId + ((blockIdx.y & 1) << 1) + (blockIdx.x & 1); restartDpcm = 0 == (mcuBlockId % (6 * ri)); } else { mcuBlockId = 6 * dctBlockId + 3 + channel; restartDpcm = 0 == (dctBlockId % ri); } } break; } // 0 restart interval means no restart if (ri == 0) { restartDpcm = false; } int streamOffset = mcuBlockId * DCT_SIZE2; bitstream += streamOffset; // We select relevant huffman tables: we got different ones for luma/chroma as well as DC/AC uint8_t *huffSize; uint32_t *huffCode; uint8_t *acSize; uint32_t *acCode; SelectHuffmanTable(isLuminance, isDC, huffSize, huffCode, acSize, acCode); // Copy the block in shared mem in *zig-zag* *order*, the higher the zig-zag index of a coeff, the higher frequency the corresponding basis function, // the more likely it was quantized to zero int16_t sym = dct[dctOffset]; #ifdef SEBA_VIDEO_CUDA_DEBUG const int dbgBlock = 0, dbgChannel = 2; if (isDC && channel == dbgChannel && dctBlockId == dbgBlock) { for (int j = 0; j < DCT_SIZE2; ++j) { printf("%5d ", dct[dctBlockId * DCT_SIZE2 + j]); } printf("\n"); } #endif // Step 1: // zero run length encoding: we encode DC, and non zero ACs uint32_t isEncoded = isDC || sym; // We prefix sum a 2 warp long boolean array using voting operation and bit tricks int warp = threadIdx.x >> 5, lane = threadIdx.x & 31; uint32_t rleOffset = __popc(__ballot(isEncoded) & (((uint32_t)1 << (lane + 1)) - 1)); __shared__ uint32_t otherWarpOffset; if (31 == lane && 0 == warp) { otherWarpOffset = rleOffset; } __syncthreads(); if (1 == warp) { rleOffset += otherWarpOffset; } rleOffset -= isEncoded; // Compress zero runs if (isEncoded) { // We need to remember the offset of uncompressed runs in order to compute the zero run length rleRunOffsets[rleOffset] = threadIdx.x; } // DPCM of the DC coefficient *unless* we restart (restartDpcm is true) // Fetch the previous DC offset from global memory :-S if (isDC && !restartDpcm) { // Interestingly fast video always use a restart value of 8 MCUs when using 422 downsampling with 8x8 DCT blocks // (even if you set it to something different: we try to set it to 16 in fast_video_helper.cpp) // An MCU is worth 16x8 pixels in that case. Could it be that a thread block handles 8 MCUs to avoid fetching the // previous DC coefficients from global memory ? Investigate int prevOffset = dctOffset - DCT_SIZE2; if (format == SEBA_JPEG_420 && isLuminance) { prevOffset = blockIdx.x & 1 ? dctOffset - DCT_SIZE2 : (blockIdx.y & 1 ? (gridDim.x * (blockIdx.y - 1) + blockIdx.x + 1) * DCT_SIZE2 : (blockIdx.x > 0 ? (gridDim.x * (blockIdx.y + 1) + blockIdx.x - 1) * DCT_SIZE2 : dctOffset - DCT_SIZE2)); } sym -= dct[prevOffset]; } // The number of encoded symbols is given by the last value of an *inclusive* scan. // Since we've done an exclusive one to get 0 based offsets, we need to compute it __shared__ int nEncodedSymbols; if (threadIdx.x == DCT_SIZE2 - 1) { nEncodedSymbols = rleOffset + isEncoded; } __syncthreads(); bool isLast = rleOffset == nEncodedSymbols - 1; bool blockHasTrailingZeros = rleRunOffsets[nEncodedSymbols - 1] != DCT_SIZE2 - 1; // Step 2: // simulate huffman variable length encoding uint32_t huffmanSize[1]; huffmanSize[0] = 0; int16_t sym2; uint8_t len; if (isEncoded) { /* This code assumes we are on a two's complement machine */ sym2 = sym; if (sym < 0) { sym = -sym; --sym2; } // Number of zeros before a non zero AC or the (eventually zero) DPCM'ed DC len = isDC ? 0 : threadIdx.x - rleRunOffsets[rleOffset - 1] - 1; uint32_t nbits = (len >> 4) * huffSize[0xF0]; uint8_t magnitudeBits = NbBits(sym); // It is important to 0xF mask as the bitshift operation argument will // be implicitely casted to unsigned int nbits += huffSize[((len & 0xF) << 4) + magnitudeBits]; nbits += magnitudeBits; huffmanSize[0] = nbits; // If the last coef(s) were zero, emit an end-of-block code if (isLast && blockHasTrailingZeros) { huffmanSize[0] += acSize[0]; } } #ifdef SEBA_VIDEO_CUDA_DEBUG if (isDC && channel == dbgChannel && dctBlockId == dbgBlock) { for (int i = 0; i < nEncodedSymbols; ++i) { int l = !i ? 0 : rleRunOffsets[i] - rleRunOffsets[i - 1] - 1; printf("%5d", dct[dctBlockId * DCT_SIZE2 + rleRunOffsets[i]]); if (l) { printf("(%d)", l); } printf(" ", l); } printf("\n"); } #endif // and we sync here again cause the boys are going to prefix sum huffmanSizes // so that a warp might not have saved lastHuffSize before it starts typedef BlockScan<uint32_t, DCT_SIZE2> BlockScan; __shared__ typename BlockScan::TempStorage temp_storage; uint32_t huffmanOffset[1]; BlockScan(temp_storage).ExclusiveSum(huffmanSize, huffmanOffset); bitstream[threadIdx.x] = 0; __syncthreads(); #ifdef SEBA_VIDEO_CUDA_DEBUG if (isDC && channel == dbgChannel && dctBlockId == dbgBlock) { for (int i = 0; i < nEncodedSymbols; ++i) { uint8_t *thuffSize; uint32_t *thuffCode; uint8_t *tacSize; uint32_t *tacCode; SelectHuffmanTable(isLuminance, i == 0, thuffSize, thuffCode, tacSize, tacCode); /* This code assumes we are on a two's complement machine */ int16_t sym = dct[dctBlockId * DCT_SIZE2 + rleRunOffsets[i]], sym2 = sym; if (sym < 0) { sym = -sym; --sym2; } uint8_t len = i == 0 ? 0 : rleRunOffsets[i] - rleRunOffsets[i - 1] - 1; while (len >= 16) { printf("%u %u\n", thuffCode[0xF0], thuffSize[0xF0]); len -= 16; } /* Find the number of bits needed for the magnitude of the coefficient */ /* there must be at least one 1 bit */ uint8_t nbits = NbBits(sym); /* Emit Huffman symbol for run length / number of bits */ int x = (len << 4) + nbits; printf("%u %u\n", thuffCode[x], thuffSize[x]); /* Emit that number of bits of the value, if positive, */ /* or the complement of its magnitude, if negative. */ printf("%u %u\n", (uint32_t)sym2, nbits); if (i == nEncodedSymbols - 1 && blockHasTrailingZeros) { printf("%u %u\n", tacCode[0], tacSize[0]); } } } #endif if (isEncoded) { StreamWriter writer(bitstream, huffmanOffset[0]); #pragma unroll 4 while (len >= 16) { writer.WriteBits(huffCode[0xF0], huffSize[0xF0]); len -= 16; } /* Find the number of bits needed for the magnitude of the coefficient */ /* there must be at least one 1 bit */ uint8_t nbits = NbBits(sym); /* Emit Huffman symbol for run length / number of bits */ int i = (len << 4) + nbits; writer.WriteBits(huffCode[i], huffSize[i]); /* Emit that number of bits of the value, if positive, */ /* or the complement of its magnitude, if negative. */ writer.WriteBits((uint32_t)sym2, nbits); // This has to to use AC tables even if the DC thread is writting the end of block if (isLast && blockHasTrailingZeros) { writer.WriteBits(acCode[0], acSize[0]); } writer.Flush(); } __syncthreads(); if (threadIdx.x == DCT_SIZE2 - 1) { // in order to get the entropy coded size of the block in bits uint32_t total = huffmanOffset[0] + huffmanSize[0]; // thread 0 is writting the *per* *block* information huffmanBlockSize[mcuBlockId] = total; int lastIdx = (uint16_t)ceil((float)total / 8) - 1; uint8_t tail = *((uint8_t *)bitstream + lastIdx); uint8_t secondToTail = lastIdx > 0 ? *((uint8_t *)bitstream + lastIdx - 1) : 0; int extraBits = total & 7; int toShift = (8 - extraBits) & 7; huffmanBlockTail[mcuBlockId] = secondToTail << (8 - toShift) | (tail >> toShift); } } // The idea here is to exchange data between adjacent entropy coded blocks in order to get eveything nicely byte aligned. // For this to work an encoded block must be at least a byte long, which is the case because a DC huffword is at least 3 bits // + the AC empty block huffword is 5 bits long, so it works __global__ void alignBitstream_gpu(uint8_t *bitstream, uint8_t *huffmanBlockTail, uint32_t *huffmanBlockSize, uint32_t *huffmanBlockOffset, int ri) { int byteOffset = threadIdx.x; int mcuBlockId = blockIdx.x; // We need to compute the ID of the last "restart" MCU: int restartMcuBlockId = (mcuBlockId / ri) * ri; if (ri == 0) { restartMcuBlockId = 0; } bool markRestart = mcuBlockId == restartMcuBlockId + ri - 1; //0 == (mcuBlockId + 1) % ri int streamOffset = mcuBlockId * DCT_SIZE2 * sizeof(uint32_t); bitstream += streamOffset; // Here is the trick: we prefixed sum huffman coded block sizes without taking into account restart markers. // Offset must be relative to the last "restart" MCU. Hence uint32_t offset = huffmanBlockOffset[mcuBlockId] - huffmanBlockOffset[restartMcuBlockId]; // In particular when mucBlockId is a restart MCU (restartDpcm is true) offset/toShift is 0. int toShift = offset & 0x7; uint32_t bitSize = huffmanBlockSize[mcuBlockId] + toShift; // We account for our tail being stollen. // The tail of the last block can't be stollen it's padded with 0. // Likewise the tail of a block prior a restart is not stollen, it is padded with 1s if (mcuBlockId < gridDim.x - 1 && !markRestart) { bitSize &= ~7; } uint32_t byteSize = (uint16_t)ceil((float)bitSize / 8); uint32_t escapedLength[1], escapedOffset[1]; escapedLength[0] = 0; uint8_t myByte = 0; if (byteOffset < byteSize) { myByte = bitstream[byteOffset]; if (toShift) { uint8_t prevByte = byteOffset == 0 ? huffmanBlockTail[mcuBlockId - 1] : bitstream[byteOffset - 1]; myByte = prevByte << (8 - toShift) | (myByte >> toShift); } if (markRestart && byteOffset == byteSize - 1) { // If we're going to mark a restart, we need to pad with ones int nones = (8 - (bitSize & 7)) & 7; myByte |= (1 << nones) - 1; // Double check that one } escapedLength[0] = myByte == 0xFF ? 2 : 1; } // Restart interval: we've padded with ones at that stage, now we need to insert the marker (2 bytes) typedef BlockScan<uint32_t, 2 * DCT_SIZE2> BlockScan; __shared__ typename BlockScan::TempStorage temp_storage; BlockScan(temp_storage).ExclusiveSum(escapedLength, escapedOffset); if (byteOffset < byteSize) { int bSize = escapedOffset[0] + escapedLength[0]; bitstream[bSize - 1] = 0; bitstream[escapedOffset[0]] = myByte; if (byteOffset == byteSize - 1) { if (markRestart) { bitstream[bSize] = 0xFF; bitstream[bSize + 1] = 0xD0 | ((mcuBlockId / ri) & 0x7); bSize += 2; } huffmanBlockSize[mcuBlockId] = bSize; } } } __global__ void concatBitstream_gpu(const uint32_t *src, const uint32_t *huffmanBlockSize, uint32_t *huffmanBlockOffset, uint8_t *dst, uint32_t *bytestreamSize) { __shared__ uint32_t block[DCT_SIZE2]; block[threadIdx.x] = src[blockIdx.x * DCT_SIZE2 + threadIdx.x]; __syncthreads(); uint32_t offset = huffmanBlockOffset[blockIdx.x]; dst += offset + threadIdx.x; uint32_t size = huffmanBlockSize[blockIdx.x]; #pragma unroll 4 for (uint32_t i = threadIdx.x; i < size; i += blockDim.x, dst += blockDim.x) { *dst = ((uint8_t *)block)[i]; } if (blockIdx.x == gridDim.x - 1 && threadIdx.x == 0) { *bytestreamSize = offset + size; } } }
e07a8e9333d7ec740a2871e3c5256ac94a9d5a20.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "gtest/gtest.h" #include "paddle/platform/device_context.h" #include "glog/logging.h" TEST(Device, Init) { using paddle::platform::DeviceContext; using paddle::platform::CUDADeviceContext; using paddle::platform::GPUPlace; int count = paddle::platform::GetCUDADeviceCount(); for (int i = 0; i < count; i++) { CUDADeviceContext* device_context = new CUDADeviceContext(GPUPlace(i)); Eigen::GpuDevice* gpu_device = device_context->eigen_device(); ASSERT_NE(nullptr, gpu_device); delete device_context; } } TEST(Device, CUDADeviceContext) { using paddle::platform::CUDADeviceContext; using paddle::platform::GPUPlace; int count = paddle::platform::GetCUDADeviceCount(); for (int i = 0; i < count; i++) { CUDADeviceContext* device_context = new CUDADeviceContext(GPUPlace(i)); Eigen::GpuDevice* gpu_device = device_context->eigen_device(); ASSERT_NE(nullptr, gpu_device); cudnnHandle_t cudnn_handle = device_context->cudnn_handle(); ASSERT_NE(nullptr, cudnn_handle); hipblasHandle_t cublas_handle = device_context->cublas_handle(); ASSERT_NE(nullptr, cublas_handle); ASSERT_NE(nullptr, device_context->stream()); delete device_context; } } TEST(Device, CUDNNDeviceContext) { using paddle::platform::CUDNNDeviceContext; using paddle::platform::CUDNNPlace; if (paddle::platform::dynload::HasCUDNN()) { int count = paddle::platform::GetCUDADeviceCount(); for (int i = 0; i < count; ++i) { CUDNNDeviceContext* device_context = new CUDNNDeviceContext(CUDNNPlace(i)); cudnnHandle_t cudnn_handle = device_context->cudnn_handle(); ASSERT_NE(nullptr, cudnn_handle); ASSERT_NE(nullptr, device_context->stream()); delete device_context; } } } TEST(Device, DeviceContextPool) { using paddle::platform::DeviceContextPool; using paddle::platform::CUDADeviceContext; using paddle::platform::Place; using paddle::platform::CPUPlace; using paddle::platform::GPUPlace; DeviceContextPool& pool = DeviceContextPool::Get(); auto cpu_dev_ctx1 = pool.Borrow(CPUPlace()); auto cpu_dev_ctx2 = pool.Borrow(CPUPlace()); EXPECT_TRUE(cpu_dev_ctx2 == cpu_dev_ctx1); std::vector<Place> gpu_places; int count = paddle::platform::GetCUDADeviceCount(); for (int i = 0; i < count; ++i) { gpu_places.emplace_back(GPUPlace(i)); } auto dev_ctxs = pool.Borrow(gpu_places); for (size_t i = 0; i < dev_ctxs.size(); ++i) { auto* dev_ctx = static_cast<const CUDADeviceContext*>(dev_ctxs[i]); // check same as GPUPlace(i) GPUPlace place = boost::get<GPUPlace>(dev_ctx->GetPlace()); EXPECT_EQ(place.GetDeviceId(), static_cast<int>(i)); } } int main(int argc, char** argv) { int dev_count = paddle::platform::GetCUDADeviceCount(); if (dev_count <= 1) { LOG(WARNING) << "Cannot test multi-gpu DeviceContextPool, because the CUDA " "device count is " << dev_count; return 0; } std::vector<paddle::platform::Place> places; places.emplace_back(paddle::platform::CPUPlace()); int count = paddle::platform::GetCUDADeviceCount(); for (int i = 0; i < count; ++i) { places.emplace_back(paddle::platform::GPUPlace(i)); } VLOG(0) << " DeviceCount " << count; paddle::platform::DeviceContextPool::Create(places); testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); }
e07a8e9333d7ec740a2871e3c5256ac94a9d5a20.cu
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "gtest/gtest.h" #include "paddle/platform/device_context.h" #include "glog/logging.h" TEST(Device, Init) { using paddle::platform::DeviceContext; using paddle::platform::CUDADeviceContext; using paddle::platform::GPUPlace; int count = paddle::platform::GetCUDADeviceCount(); for (int i = 0; i < count; i++) { CUDADeviceContext* device_context = new CUDADeviceContext(GPUPlace(i)); Eigen::GpuDevice* gpu_device = device_context->eigen_device(); ASSERT_NE(nullptr, gpu_device); delete device_context; } } TEST(Device, CUDADeviceContext) { using paddle::platform::CUDADeviceContext; using paddle::platform::GPUPlace; int count = paddle::platform::GetCUDADeviceCount(); for (int i = 0; i < count; i++) { CUDADeviceContext* device_context = new CUDADeviceContext(GPUPlace(i)); Eigen::GpuDevice* gpu_device = device_context->eigen_device(); ASSERT_NE(nullptr, gpu_device); cudnnHandle_t cudnn_handle = device_context->cudnn_handle(); ASSERT_NE(nullptr, cudnn_handle); cublasHandle_t cublas_handle = device_context->cublas_handle(); ASSERT_NE(nullptr, cublas_handle); ASSERT_NE(nullptr, device_context->stream()); delete device_context; } } TEST(Device, CUDNNDeviceContext) { using paddle::platform::CUDNNDeviceContext; using paddle::platform::CUDNNPlace; if (paddle::platform::dynload::HasCUDNN()) { int count = paddle::platform::GetCUDADeviceCount(); for (int i = 0; i < count; ++i) { CUDNNDeviceContext* device_context = new CUDNNDeviceContext(CUDNNPlace(i)); cudnnHandle_t cudnn_handle = device_context->cudnn_handle(); ASSERT_NE(nullptr, cudnn_handle); ASSERT_NE(nullptr, device_context->stream()); delete device_context; } } } TEST(Device, DeviceContextPool) { using paddle::platform::DeviceContextPool; using paddle::platform::CUDADeviceContext; using paddle::platform::Place; using paddle::platform::CPUPlace; using paddle::platform::GPUPlace; DeviceContextPool& pool = DeviceContextPool::Get(); auto cpu_dev_ctx1 = pool.Borrow(CPUPlace()); auto cpu_dev_ctx2 = pool.Borrow(CPUPlace()); EXPECT_TRUE(cpu_dev_ctx2 == cpu_dev_ctx1); std::vector<Place> gpu_places; int count = paddle::platform::GetCUDADeviceCount(); for (int i = 0; i < count; ++i) { gpu_places.emplace_back(GPUPlace(i)); } auto dev_ctxs = pool.Borrow(gpu_places); for (size_t i = 0; i < dev_ctxs.size(); ++i) { auto* dev_ctx = static_cast<const CUDADeviceContext*>(dev_ctxs[i]); // check same as GPUPlace(i) GPUPlace place = boost::get<GPUPlace>(dev_ctx->GetPlace()); EXPECT_EQ(place.GetDeviceId(), static_cast<int>(i)); } } int main(int argc, char** argv) { int dev_count = paddle::platform::GetCUDADeviceCount(); if (dev_count <= 1) { LOG(WARNING) << "Cannot test multi-gpu DeviceContextPool, because the CUDA " "device count is " << dev_count; return 0; } std::vector<paddle::platform::Place> places; places.emplace_back(paddle::platform::CPUPlace()); int count = paddle::platform::GetCUDADeviceCount(); for (int i = 0; i < count; ++i) { places.emplace_back(paddle::platform::GPUPlace(i)); } VLOG(0) << " DeviceCount " << count; paddle::platform::DeviceContextPool::Create(places); testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); }
51079ef2005a27c9e943b217de1ae43bb211c8b7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_multidim_copy_kernel; int xdim0_multidim_copy_kernel_h = -1; __constant__ int ydim0_multidim_copy_kernel; int ydim0_multidim_copy_kernel_h = -1; __constant__ int xdim1_multidim_copy_kernel; int xdim1_multidim_copy_kernel_h = -1; __constant__ int ydim1_multidim_copy_kernel; int ydim1_multidim_copy_kernel_h = -1; #undef OPS_ACC_MD0 #undef OPS_ACC_MD1 #define OPS_ACC_MD0(d, x, y) \ ((x) + (xdim0_multidim_copy_kernel * (y)) + \ (d)*xdim0_multidim_copy_kernel * ydim0_multidim_copy_kernel) #define OPS_ACC_MD1(d, x, y) \ ((x) + (xdim1_multidim_copy_kernel * (y)) + \ (d)*xdim1_multidim_copy_kernel * ydim1_multidim_copy_kernel) // user function __device__ void multidim_copy_kernel_gpu(const double *src, double *dest) { dest[OPS_ACC_MD1(0, 0, 0)] = src[OPS_ACC_MD0(0, 0, 0)]; dest[OPS_ACC_MD1(1, 0, 0)] = src[OPS_ACC_MD0(1, 0, 0)]; } #undef OPS_ACC_MD0 #undef OPS_ACC_MD1 __global__ void ops_multidim_copy_kernel(const double *__restrict arg0, double *__restrict arg1, int size0, int size1) { int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 + idx_y * 1 * xdim0_multidim_copy_kernel; arg1 += idx_x * 1 + idx_y * 1 * xdim1_multidim_copy_kernel; if (idx_x < size0 && idx_y < size1) { multidim_copy_kernel_gpu(arg0, arg1); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_multidim_copy_kernel(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1) { #else void ops_par_loop_multidim_copy_kernel_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; #endif // Timing double t1, t2, c1, c2; ops_arg args[2] = {arg0, arg1}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args, 2, range, 1)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(1, "multidim_copy_kernel"); OPS_kernels[1].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[2]; int end[2]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 2; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 2; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != xdim0_multidim_copy_kernel_h || ydim0 != ydim0_multidim_copy_kernel_h || xdim1 != xdim1_multidim_copy_kernel_h || ydim1 != ydim1_multidim_copy_kernel_h) { hipMemcpyToSymbol(xdim0_multidim_copy_kernel, &xdim0, sizeof(int)); xdim0_multidim_copy_kernel_h = xdim0; hipMemcpyToSymbol(ydim0_multidim_copy_kernel, &ydim0, sizeof(int)); ydim0_multidim_copy_kernel_h = ydim0; hipMemcpyToSymbol(xdim1_multidim_copy_kernel, &xdim1, sizeof(int)); xdim1_multidim_copy_kernel_h = xdim1; hipMemcpyToSymbol(ydim1_multidim_copy_kernel, &ydim1, sizeof(int)); ydim1_multidim_copy_kernel_h = ydim1; } dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, 1); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); char *p_a[2]; // set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); p_a[1] = (char *)args[1].data_d + base1; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 2); ops_halo_exchanges(args, 2, range); #endif if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[1].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data hipLaunchKernelGGL(( ops_multidim_copy_kernel), dim3(grid), dim3(tblock), 0, 0, (double *)p_a[0], (double *)p_a[1], x_size, y_size); cutilSafeCall(hipGetLastError()); if (OPS_diags > 1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[1].time += t1 - t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 2); ops_set_halo_dirtybit3(&args[1], range); #endif if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[1].mpi_time += t2 - t1; OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg1); } } #ifdef OPS_LAZY void ops_par_loop_multidim_copy_kernel(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 1; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 1; for (int i = 0; i < 4; i++) { desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 2; desc->args = (ops_arg *)malloc(2 * sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->function = ops_par_loop_multidim_copy_kernel_execute; if (OPS_diags > 1) { ops_timing_realloc(1, "multidim_copy_kernel"); } ops_enqueue_kernel(desc); } #endif
51079ef2005a27c9e943b217de1ae43bb211c8b7.cu
// // auto-generated by ops.py // __constant__ int xdim0_multidim_copy_kernel; int xdim0_multidim_copy_kernel_h = -1; __constant__ int ydim0_multidim_copy_kernel; int ydim0_multidim_copy_kernel_h = -1; __constant__ int xdim1_multidim_copy_kernel; int xdim1_multidim_copy_kernel_h = -1; __constant__ int ydim1_multidim_copy_kernel; int ydim1_multidim_copy_kernel_h = -1; #undef OPS_ACC_MD0 #undef OPS_ACC_MD1 #define OPS_ACC_MD0(d, x, y) \ ((x) + (xdim0_multidim_copy_kernel * (y)) + \ (d)*xdim0_multidim_copy_kernel * ydim0_multidim_copy_kernel) #define OPS_ACC_MD1(d, x, y) \ ((x) + (xdim1_multidim_copy_kernel * (y)) + \ (d)*xdim1_multidim_copy_kernel * ydim1_multidim_copy_kernel) // user function __device__ void multidim_copy_kernel_gpu(const double *src, double *dest) { dest[OPS_ACC_MD1(0, 0, 0)] = src[OPS_ACC_MD0(0, 0, 0)]; dest[OPS_ACC_MD1(1, 0, 0)] = src[OPS_ACC_MD0(1, 0, 0)]; } #undef OPS_ACC_MD0 #undef OPS_ACC_MD1 __global__ void ops_multidim_copy_kernel(const double *__restrict arg0, double *__restrict arg1, int size0, int size1) { int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 + idx_y * 1 * xdim0_multidim_copy_kernel; arg1 += idx_x * 1 + idx_y * 1 * xdim1_multidim_copy_kernel; if (idx_x < size0 && idx_y < size1) { multidim_copy_kernel_gpu(arg0, arg1); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_multidim_copy_kernel(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1) { #else void ops_par_loop_multidim_copy_kernel_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; #endif // Timing double t1, t2, c1, c2; ops_arg args[2] = {arg0, arg1}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args, 2, range, 1)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(1, "multidim_copy_kernel"); OPS_kernels[1].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[2]; int end[2]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 2; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 2; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != xdim0_multidim_copy_kernel_h || ydim0 != ydim0_multidim_copy_kernel_h || xdim1 != xdim1_multidim_copy_kernel_h || ydim1 != ydim1_multidim_copy_kernel_h) { cudaMemcpyToSymbol(xdim0_multidim_copy_kernel, &xdim0, sizeof(int)); xdim0_multidim_copy_kernel_h = xdim0; cudaMemcpyToSymbol(ydim0_multidim_copy_kernel, &ydim0, sizeof(int)); ydim0_multidim_copy_kernel_h = ydim0; cudaMemcpyToSymbol(xdim1_multidim_copy_kernel, &xdim1, sizeof(int)); xdim1_multidim_copy_kernel_h = xdim1; cudaMemcpyToSymbol(ydim1_multidim_copy_kernel, &ydim1, sizeof(int)); ydim1_multidim_copy_kernel_h = ydim1; } dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, 1); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); char *p_a[2]; // set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); p_a[1] = (char *)args[1].data_d + base1; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 2); ops_halo_exchanges(args, 2, range); #endif if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[1].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data ops_multidim_copy_kernel<<<grid, tblock>>>((double *)p_a[0], (double *)p_a[1], x_size, y_size); cutilSafeCall(cudaGetLastError()); if (OPS_diags > 1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[1].time += t1 - t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 2); ops_set_halo_dirtybit3(&args[1], range); #endif if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[1].mpi_time += t2 - t1; OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg1); } } #ifdef OPS_LAZY void ops_par_loop_multidim_copy_kernel(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 1; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 1; for (int i = 0; i < 4; i++) { desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 2; desc->args = (ops_arg *)malloc(2 * sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->function = ops_par_loop_multidim_copy_kernel_execute; if (OPS_diags > 1) { ops_timing_realloc(1, "multidim_copy_kernel"); } ops_enqueue_kernel(desc); } #endif
64dd09fea964b0e95d15d8756c4d58656e3467d3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "SimpleMOC-kernel_header.h" __global__ void setup_kernel(hiprandState_t *state, Input I) { int threadId = blockIdx.x *blockDim.x + threadIdx.x; if( threadId >= I.streams) return; hiprand_init(1234, threadId, 0, &state[threadId]); } // Initialize global flux states to random numbers on device // Slow, poor use of GPU, but fine since it's just initialization code __global__ void init_flux_states( float * flux_states, int N_flux_states, Input I, hiprandState_t * state) { int blockId = blockIdx.y * gridDim.x + blockIdx.x; // geometric segment //int threadId = blockId * blockDim.x + threadIdx.x; // energy group if(blockId >= N_flux_states) return; // Assign RNG state hiprandState_t * localState = &state[blockId % I.streams]; if( threadIdx.x == 0 ) for( int i = 0; i < I.egroups; i++ ) flux_states[blockId +i] = hiprand_uniform(localState); } // Gets I from user and sets defaults Input set_default_input( void ) { Input I; I.source_2D_regions = 5000; I.coarse_axial_intervals = 27; I.fine_axial_intervals = 5; I.decomp_assemblies_ax = 20; // Number of subdomains per assembly axially I.segments = 50000000; I.egroups = 128; I.streams = 10000; I.seg_per_thread = 100; return I; } // Returns a memory esimate (in MB) for the program's primary data structures double mem_estimate( Input I ) { size_t nbytes = 0; // Sources Array nbytes += I.source_3D_regions * sizeof(Source); // Fine Source Data long N_fine = I.source_3D_regions * I.fine_axial_intervals * I.egroups; nbytes += N_fine * sizeof(float); // Fine Flux Data nbytes += N_fine * sizeof(float); // SigT Data long N_sigT = I.source_3D_regions * I.egroups; nbytes += N_sigT * sizeof(float); // Return MB return (double) nbytes / 1024.0 / 1024.0; } Source * initialize_sources( Input I, Source_Arrays * SA ) { // Source Data Structure Allocation Source * sources = (Source *) malloc( I.source_3D_regions * sizeof(Source)); // Allocate Fine Source Data long N_fine = I.source_3D_regions * I.fine_axial_intervals * I.egroups; SA->fine_source_arr = (float *) malloc( N_fine * sizeof(float)); for( int i = 0; i < I.source_3D_regions; i++ ) sources[i].fine_source_id = i*I.fine_axial_intervals*I.egroups; // Allocate Fine Flux Data SA->fine_flux_arr = (float *) malloc( N_fine * sizeof(float)); for( int i = 0; i < I.source_3D_regions; i++ ) sources[i].fine_flux_id = i*I.fine_axial_intervals*I.egroups; // Allocate SigT Data long N_sigT = I.source_3D_regions * I.egroups; SA->sigT_arr = (float *) malloc( N_sigT * sizeof(float)); for( int i = 0; i < I.source_3D_regions; i++ ) sources[i].sigT_id = i * I.egroups; // Initialize fine source and flux to random numbers for( long i = 0; i < N_fine; i++ ) { SA->fine_source_arr[i] = (float) rand() / RAND_MAX; SA->fine_flux_arr[i] = (float) rand() / RAND_MAX; } // Initialize SigT Values for( int i = 0; i < N_sigT; i++ ) SA->sigT_arr[i] = (float) rand() / RAND_MAX; return sources; } Source * initialize_device_sources( Input I, Source_Arrays * SA_h, Source_Arrays * SA_d, Source * sources_h ) { // Allocate & Copy Fine Source Data long N_fine = I.source_3D_regions * I.fine_axial_intervals * I.egroups; hipMalloc((void **) &SA_d->fine_source_arr, N_fine * sizeof(float)); hipMemcpy(SA_d->fine_source_arr, SA_h->fine_source_arr, N_fine * sizeof(float), hipMemcpyHostToDevice); // Allocate & Copy Fine Flux Data hipMalloc((void **) &SA_d->fine_flux_arr, N_fine * sizeof(float)); hipMemcpy(SA_d->fine_flux_arr, SA_h->fine_flux_arr, N_fine * sizeof(float), hipMemcpyHostToDevice); // Allocate & Copy SigT Data long N_sigT = I.source_3D_regions * I.egroups; hipMalloc((void **) &SA_d->sigT_arr, N_sigT * sizeof(float)); hipMemcpy(SA_d->sigT_arr, SA_h->sigT_arr, N_sigT * sizeof(float), hipMemcpyHostToDevice); // Allocate & Copy Source Array Data Source * sources_d; hipMalloc((void **) &sources_d, I.source_3D_regions * sizeof(Source)); hipMemcpy(sources_d, sources_h, I.source_3D_regions * sizeof(Source), hipMemcpyHostToDevice); return sources_d; } // Builds a table of exponential values for linear interpolation Table buildExponentialTable( void ) { // define table Table table; //float precision = 0.01; float maxVal = 10.0; // compute number of arry values //int N = (int) ( maxVal * sqrt(1.0 / ( 8.0 * precision * 0.01 ) ) ); int N = 353; // compute spacing float dx = maxVal / (float) N; // store linear segment information (slope and y-intercept) for( int n = 0; n < N; n++ ) { // compute slope and y-intercept for ( 1 - exp(-x) ) float exponential = exp( - n * dx ); table.values[ 2*n ] = - exponential; table.values[ 2*n + 1 ] = 1 + ( n * dx - 1 ) * exponential; } // assign data to table table.dx = dx; table.maxVal = maxVal - table.dx; table.N = N; return table; } void __cudaCheckError( const char *file, const int line ) { #ifdef CUDA_ERROR_CHECK hipError_t err = hipGetLastError(); if ( hipSuccess != err ) { fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n", file, line, hipGetErrorString( err ) ); exit( -1 ); } // More careful checking. However, this will affect performance. // Comment away if needed. err = hipDeviceSynchronize(); if( hipSuccess != err ) { fprintf( stderr, "cudaCheckError() with sync failed at %s:%i : %s\n", file, line, hipGetErrorString( err ) ); exit( -1 ); } #endif return; }
64dd09fea964b0e95d15d8756c4d58656e3467d3.cu
#include "SimpleMOC-kernel_header.h" __global__ void setup_kernel(curandState *state, Input I) { int threadId = blockIdx.x *blockDim.x + threadIdx.x; if( threadId >= I.streams) return; curand_init(1234, threadId, 0, &state[threadId]); } // Initialize global flux states to random numbers on device // Slow, poor use of GPU, but fine since it's just initialization code __global__ void init_flux_states( float * flux_states, int N_flux_states, Input I, curandState * state) { int blockId = blockIdx.y * gridDim.x + blockIdx.x; // geometric segment //int threadId = blockId * blockDim.x + threadIdx.x; // energy group if(blockId >= N_flux_states) return; // Assign RNG state curandState * localState = &state[blockId % I.streams]; if( threadIdx.x == 0 ) for( int i = 0; i < I.egroups; i++ ) flux_states[blockId +i] = curand_uniform(localState); } // Gets I from user and sets defaults Input set_default_input( void ) { Input I; I.source_2D_regions = 5000; I.coarse_axial_intervals = 27; I.fine_axial_intervals = 5; I.decomp_assemblies_ax = 20; // Number of subdomains per assembly axially I.segments = 50000000; I.egroups = 128; I.streams = 10000; I.seg_per_thread = 100; return I; } // Returns a memory esimate (in MB) for the program's primary data structures double mem_estimate( Input I ) { size_t nbytes = 0; // Sources Array nbytes += I.source_3D_regions * sizeof(Source); // Fine Source Data long N_fine = I.source_3D_regions * I.fine_axial_intervals * I.egroups; nbytes += N_fine * sizeof(float); // Fine Flux Data nbytes += N_fine * sizeof(float); // SigT Data long N_sigT = I.source_3D_regions * I.egroups; nbytes += N_sigT * sizeof(float); // Return MB return (double) nbytes / 1024.0 / 1024.0; } Source * initialize_sources( Input I, Source_Arrays * SA ) { // Source Data Structure Allocation Source * sources = (Source *) malloc( I.source_3D_regions * sizeof(Source)); // Allocate Fine Source Data long N_fine = I.source_3D_regions * I.fine_axial_intervals * I.egroups; SA->fine_source_arr = (float *) malloc( N_fine * sizeof(float)); for( int i = 0; i < I.source_3D_regions; i++ ) sources[i].fine_source_id = i*I.fine_axial_intervals*I.egroups; // Allocate Fine Flux Data SA->fine_flux_arr = (float *) malloc( N_fine * sizeof(float)); for( int i = 0; i < I.source_3D_regions; i++ ) sources[i].fine_flux_id = i*I.fine_axial_intervals*I.egroups; // Allocate SigT Data long N_sigT = I.source_3D_regions * I.egroups; SA->sigT_arr = (float *) malloc( N_sigT * sizeof(float)); for( int i = 0; i < I.source_3D_regions; i++ ) sources[i].sigT_id = i * I.egroups; // Initialize fine source and flux to random numbers for( long i = 0; i < N_fine; i++ ) { SA->fine_source_arr[i] = (float) rand() / RAND_MAX; SA->fine_flux_arr[i] = (float) rand() / RAND_MAX; } // Initialize SigT Values for( int i = 0; i < N_sigT; i++ ) SA->sigT_arr[i] = (float) rand() / RAND_MAX; return sources; } Source * initialize_device_sources( Input I, Source_Arrays * SA_h, Source_Arrays * SA_d, Source * sources_h ) { // Allocate & Copy Fine Source Data long N_fine = I.source_3D_regions * I.fine_axial_intervals * I.egroups; cudaMalloc((void **) &SA_d->fine_source_arr, N_fine * sizeof(float)); cudaMemcpy(SA_d->fine_source_arr, SA_h->fine_source_arr, N_fine * sizeof(float), cudaMemcpyHostToDevice); // Allocate & Copy Fine Flux Data cudaMalloc((void **) &SA_d->fine_flux_arr, N_fine * sizeof(float)); cudaMemcpy(SA_d->fine_flux_arr, SA_h->fine_flux_arr, N_fine * sizeof(float), cudaMemcpyHostToDevice); // Allocate & Copy SigT Data long N_sigT = I.source_3D_regions * I.egroups; cudaMalloc((void **) &SA_d->sigT_arr, N_sigT * sizeof(float)); cudaMemcpy(SA_d->sigT_arr, SA_h->sigT_arr, N_sigT * sizeof(float), cudaMemcpyHostToDevice); // Allocate & Copy Source Array Data Source * sources_d; cudaMalloc((void **) &sources_d, I.source_3D_regions * sizeof(Source)); cudaMemcpy(sources_d, sources_h, I.source_3D_regions * sizeof(Source), cudaMemcpyHostToDevice); return sources_d; } // Builds a table of exponential values for linear interpolation Table buildExponentialTable( void ) { // define table Table table; //float precision = 0.01; float maxVal = 10.0; // compute number of arry values //int N = (int) ( maxVal * sqrt(1.0 / ( 8.0 * precision * 0.01 ) ) ); int N = 353; // compute spacing float dx = maxVal / (float) N; // store linear segment information (slope and y-intercept) for( int n = 0; n < N; n++ ) { // compute slope and y-intercept for ( 1 - exp(-x) ) float exponential = exp( - n * dx ); table.values[ 2*n ] = - exponential; table.values[ 2*n + 1 ] = 1 + ( n * dx - 1 ) * exponential; } // assign data to table table.dx = dx; table.maxVal = maxVal - table.dx; table.N = N; return table; } void __cudaCheckError( const char *file, const int line ) { #ifdef CUDA_ERROR_CHECK cudaError err = cudaGetLastError(); if ( cudaSuccess != err ) { fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n", file, line, cudaGetErrorString( err ) ); exit( -1 ); } // More careful checking. However, this will affect performance. // Comment away if needed. err = cudaDeviceSynchronize(); if( cudaSuccess != err ) { fprintf( stderr, "cudaCheckError() with sync failed at %s:%i : %s\n", file, line, cudaGetErrorString( err ) ); exit( -1 ); } #endif return; }
0b7b84f65a50a3e720ceccd35d088a4fbdfeacd8.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "vec_sin.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int n = XSIZE*YSIZE; double *result = NULL; hipMalloc(&result, XSIZE*YSIZE); double *x = NULL; hipMalloc(&x, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( vec_sin), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( vec_sin), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( vec_sin), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
0b7b84f65a50a3e720ceccd35d088a4fbdfeacd8.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "vec_sin.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int n = XSIZE*YSIZE; double *result = NULL; cudaMalloc(&result, XSIZE*YSIZE); double *x = NULL; cudaMalloc(&x, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); vec_sin<<<gridBlock,threadBlock>>>(n,result,x); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { vec_sin<<<gridBlock,threadBlock>>>(n,result,x); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { vec_sin<<<gridBlock,threadBlock>>>(n,result,x); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
b23549d67129016d131381d3045180cfa6616c94.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ #include "WarpSelectImpl_hip.cuh" namespace faiss { namespace gpu { WARP_SELECT_IMPL(float, true, 512, 8); } } // namespace faiss
b23549d67129016d131381d3045180cfa6616c94.cu
/** * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ #include "WarpSelectImpl.cuh" namespace faiss { namespace gpu { WARP_SELECT_IMPL(float, true, 512, 8); } } // namespace faiss
88f9942e725d482f2aaf3b2f17a935b11d975db7.hip
// !!! This is a file automatically generated by hipify!!! // Copyright 2019 ETH Zrich, Thomas Schps // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // 3. Neither the name of the copyright holder nor the names of its contributors // may be used to endorse or promote products derived from this software // without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE // POSSIBILITY OF SUCH DAMAGE. #include "camera_calibration/bundle_adjustment/cuda_joint_optimization.cuh" #include <hipcub/hipcub.hpp> #include <hip/hip_runtime.h> #include <libvis/cuda/cuda_auto_tuner.h> #include <libvis/cuda/cuda_util.h> #include <math_constants.h> #include "camera_calibration/bundle_adjustment/joint_optimization_jacobians.h" #include "camera_calibration/cuda/cuda_matrix.cuh" #include "camera_calibration/cuda/cuda_util.cuh" #include "camera_calibration/models/cuda_central_generic_model.cuh" namespace vis { /* * Schema for accumulator classes: * * struct Accumulator { * /// Called if the residuals belonging to this thread are invalid. * /// This is only called once and before any SetJacobianComponent() call, * /// except for calls to SetJacobianComponent_AllThreadsSameIndex() with valid == false. * __forceinline__ __device__ void SetResidualsInvalid(PCGScalar* features_residual_x, u32 feature_index); * * /// Called if the residuals belonging to this thread are valid. * /// This is only called once and before any SetJacobianComponent() call, * /// except for calls to SetJacobianComponent_AllThreadsSameIndex() with valid == false. * __forceinline__ __device__ void SetResiduals(PCGScalar residual_x, PCGScalar residual_y, PCGScalar* features_residual_x, PCGScalar* features_residual_y, u32 feature_index); * * /// Sets the values of one column in the [2 x N] Jacobian of the pixel position * /// wrt. the unknowns. I.e., value_x corresponds to the x-residual (row 0) and value_y * /// to the y-residual (row 1). * /// * /// This version is called if there are no possible conflicts between * /// different threads in the kernel call, i.e., for a given thread, no other * /// thread in the thread grid possibly writes to the same index. * /// * /// In case the residuals are invalid (SetResidualsInvalid() has been called * /// before), this function must not be called. * __forceinline__ __device__ void SetJacobianComponent_ThreadConflictFree(u32 index, PCGScalar jac_x, PCGScalar jac_y); * * /// See SetJacobianComponent_ThreadConflictFree(). This version is used * /// if all threads in the thread block write to the same index. * /// * /// This variant of SetJacobianComponent() is called both if the residuals * /// are valid and if they are invalid to enable block-wide operations. * /// * /// NOTE: If the temp_storage is used before, a __syncthreads() has to be done. * __forceinline__ __device__ void SetJacobianComponent_AllThreadsSameIndex(u32 index, PCGScalar jac_x, PCGScalar jac_y, bool valid); * * /// See SetJacobianComponent_ThreadConflictFree(). This version is used * /// if none of the other two versions applies. * /// * /// In case the residuals are invalid (SetResidualsInvalid() has been called * /// before), this function must not be called. * __forceinline__ __device__ void SetJacobianComponent_RandomThreadConflicts(u32 index, PCGScalar jac_x, PCGScalar jac_y); * }; */ constexpr PCGScalar kHuberWeight = 1.0; // TODO: Make parameter constexpr int kResidualJacobianBlockSize = 256; template<int block_width, int block_height, bool compute_jacobians, bool are_camera_tr_rig_in_state, class Model, class Accumulator> __device__ void ComputeResidualAndJacobian( bool valid, u32 feature_index, CUDADatasetAndState& s, Model& model, Accumulator& accumulator) { u16 point_index = s.features_index[feature_index]; float3 point = make_float3(s.points[0][point_index], s.points[1][point_index], s.points[2][point_index]); u16 image_index = s.features_image[feature_index]; float3 local_point = s.image_tr_global[image_index] * point; float2 pixel = make_float2(0.5f * (model.calibration_min_x() + model.calibration_max_x() + 1), 0.5f * (model.calibration_min_y() + model.calibration_max_y() + 1)); if (!model.ProjectWithInitialEstimate(local_point, &pixel)) { if (valid) { accumulator.SetResidualsInvalid(s.features_residual_x, feature_index); } valid = false; } if (valid) { accumulator.SetResiduals( pixel.x - s.features_x[feature_index], pixel.y - s.features_y[feature_index], s.features_residual_x, s.features_residual_y, feature_index); } if (compute_jacobians) { // Compute Jacobian wrt. image pose, optionally camera_tr_rig, and point position [2 x (6 + (rig ? 6 : 0) + 3)]. // Residual: Project(exp(delta) * image_tr_pattern * pattern_point) - measurement // Compute Jacobian as follows: // (d pixel) / (d local_point) [2 x 3], numerical // * (d local_point) / (d pose and global_point) [3 x (7 + (rig ? 7 : 0) + 3)], analytical // Numerical part: CUDAMatrix<PCGScalar, 2, 3> pixel_wrt_local_point; const PCGScalar kDelta = s.numerical_diff_delta * (model.is_central_camera_model() ? Norm(local_point) : 0.1); #pragma unroll for (int dimension = 0; dimension < 3; ++ dimension) { float3 offset_point = local_point; *(&offset_point.x + dimension) += kDelta; float2 offset_pixel = pixel; if (!model.ProjectWithInitialEstimate(offset_point, &offset_pixel)) { valid = false; break; } pixel_wrt_local_point(0, dimension) = (offset_pixel.x - pixel.x) / kDelta; pixel_wrt_local_point(1, dimension) = (offset_pixel.y - pixel.y) / kDelta; } // Analytical part: CUDAMatrix<PCGScalar, 3, 7 + 7 + 3> local_point_wrt_poses_and_global_point; if (are_camera_tr_rig_in_state) { ComputeRigJacobian( s.camera_q_rig[4 * s.camera_index + 0], s.camera_q_rig[4 * s.camera_index + 1], s.camera_q_rig[4 * s.camera_index + 2], s.camera_q_rig[4 * s.camera_index + 3], point.x, point.y, point.z, s.rig_tr_global[7 * image_index + 0], s.rig_tr_global[7 * image_index + 1], s.rig_tr_global[7 * image_index + 2], s.rig_tr_global[7 * image_index + 3], s.rig_tr_global[7 * image_index + 4], s.rig_tr_global[7 * image_index + 5], s.rig_tr_global[7 * image_index + 6], local_point_wrt_poses_and_global_point.row(0), local_point_wrt_poses_and_global_point.row(1), local_point_wrt_poses_and_global_point.row(2)); } else { // NOTE: The first row expects image_q_global values. Thus, here we assume // that rig_q_global == image_q_global, i.e., the camera_q_rig // transformation is identity. ComputeJacobian( s.rig_tr_global[7 * image_index + 0], s.rig_tr_global[7 * image_index + 1], s.rig_tr_global[7 * image_index + 2], s.rig_tr_global[7 * image_index + 3], point.x, point.y, point.z, local_point_wrt_poses_and_global_point.row(0), local_point_wrt_poses_and_global_point.row(1), local_point_wrt_poses_and_global_point.row(2)); } CUDAMatrix<PCGScalar, 2, 6> pose_jacobian; CUDAMatrix<PCGScalar, 2, 6> rig_jacobian; CUDAMatrix<PCGScalar, 2, 3> point_jacobian; if (are_camera_tr_rig_in_state) { // local_point_wrt_poses_and_global_point contains the Jacobian wrt.: // - rig_tr_global (indices 0 .. 6) // - camera_tr_rig (indices 7 .. 13) // - global_point (indices 14 .. 16) CUDAMatrix<PCGScalar, 4, 3> camera_q_rig_wrt_update; QuaternionJacobianWrtLocalUpdate(s.camera_q_rig[4 * s.camera_index + 0], s.camera_q_rig[4 * s.camera_index + 1], s.camera_q_rig[4 * s.camera_index + 2], s.camera_q_rig[4 * s.camera_index + 3], &camera_q_rig_wrt_update); CUDAMatrix<PCGScalar, 4, 3> rig_q_global_wrt_update; QuaternionJacobianWrtLocalUpdate(s.rig_tr_global[7 * image_index + 0], s.rig_tr_global[7 * image_index + 1], s.rig_tr_global[7 * image_index + 2], s.rig_tr_global[7 * image_index + 3], &rig_q_global_wrt_update); CUDAMatrix<PCGScalar, 2, 4> temp; MatrixMultiply(temp, pixel_wrt_local_point, local_point_wrt_poses_and_global_point.cols<4>(0)); MatrixMultiply(pose_jacobian.cols<3>(0), temp, rig_q_global_wrt_update); MatrixMultiply(pose_jacobian.cols<3>(3), pixel_wrt_local_point, local_point_wrt_poses_and_global_point.cols<3>(0 + 4)); MatrixMultiply(temp, pixel_wrt_local_point, local_point_wrt_poses_and_global_point.cols<4>(7)); MatrixMultiply(rig_jacobian.cols<3>(0), temp, camera_q_rig_wrt_update); MatrixMultiply(rig_jacobian.cols<3>(3), pixel_wrt_local_point, local_point_wrt_poses_and_global_point.cols<3>(7 + 4)); MatrixMultiply(point_jacobian, pixel_wrt_local_point, local_point_wrt_poses_and_global_point.cols<3>(14)); } else { // local_point_wrt_poses_and_global_point contains the Jacobian wrt.: // - rig_tr_global (indices 0 .. 6) // - global_point (indices 7 .. 9) // NOTE: Here, we assume that rig_q_global == image_q_global, i.e., the // camera_q_rig transformation is identity. CUDAMatrix<PCGScalar, 4, 3> quaternion_wrt_update; // derived in derive_jacobians.py QuaternionJacobianWrtLocalUpdate(s.rig_tr_global[7 * image_index + 0], s.rig_tr_global[7 * image_index + 1], s.rig_tr_global[7 * image_index + 2], s.rig_tr_global[7 * image_index + 3], &quaternion_wrt_update); CUDAMatrix<PCGScalar, 2, 4> temp; MatrixMultiply(temp, pixel_wrt_local_point, local_point_wrt_poses_and_global_point.cols<4>(0)); MatrixMultiply(pose_jacobian.cols<3>(0), temp, quaternion_wrt_update); MatrixMultiply(pose_jacobian.cols<3>(3), pixel_wrt_local_point, local_point_wrt_poses_and_global_point.cols<3>(4)); MatrixMultiply(point_jacobian, pixel_wrt_local_point, local_point_wrt_poses_and_global_point.cols<3>(7)); } // Get the model Jacobian constexpr int num_intrinsic_variables = Model::IntrinsicsJacobianSize; CUDAMatrix<u32, num_intrinsic_variables, 1> grid_update_indices; CUDAMatrix<PCGScalar, 2, num_intrinsic_variables> intrinsic_jac; if (!model.ProjectionJacobianWrtIntrinsics( local_point, pixel, s.numerical_diff_delta, grid_update_indices.data(), intrinsic_jac.row(0), intrinsic_jac.row(1))) { valid = false; } // Accumulate Jacobians: if (are_camera_tr_rig_in_state) { if (valid) { for (int i = 0; valid && i < 6; ++ i) { accumulator.SetJacobianComponent_RandomThreadConflicts( s.rig_tr_global_start_index + 6 * image_index + i, pose_jacobian(0, i), pose_jacobian(1, i)); } } for (int i = 0; i < 6; ++ i) { accumulator.SetJacobianComponent_AllThreadsSameIndex( s.camera_tr_rig_start_index + s.camera_index * 6 + i, rig_jacobian(0, i), rig_jacobian(1, i), valid); } if (valid) { for (int i = 0; i < 3; ++ i) { accumulator.SetJacobianComponent_RandomThreadConflicts( s.points_start_index + point_index * 3 + i, point_jacobian(0, i), point_jacobian(1, i)); } } } else { if (valid) { for (int i = 0; i < 6; ++ i) { accumulator.SetJacobianComponent_RandomThreadConflicts( s.rig_tr_global_start_index + 6 * image_index + i, pose_jacobian(0, i), pose_jacobian(1, i)); } for (int i = 0; i < 3; ++ i) { accumulator.SetJacobianComponent_RandomThreadConflicts( s.points_start_index + point_index * 3 + i, point_jacobian(0, i), point_jacobian(1, i)); } } } if (valid) { for (int i = 0; i < num_intrinsic_variables; ++ i) { accumulator.SetJacobianComponent_RandomThreadConflicts( s.intrinsic_start_index + grid_update_indices(i), intrinsic_jac(0, i), intrinsic_jac(1, i)); } } } } template <int block_width, int block_height> struct PCGCompareCostAccumulator { __forceinline__ __device__ PCGCompareCostAccumulator( typename hipcub::BlockReduce<PCGScalar, block_width, cub::BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY, block_height>::TempStorage* temp_storage) : temp_storage_(temp_storage) {} __forceinline__ __device__ void SetResidualsInvalid(PCGScalar* features_residual_x, u32 feature_index) { // No need to do anything, as these residuals do not matter for the comparison } __forceinline__ __device__ void SetResiduals(PCGScalar residual_x, PCGScalar residual_y, PCGScalar* features_residual_x, PCGScalar* features_residual_y, u32 feature_index) { PCGScalar this_cost = ComputeHuberCost(residual_x, residual_y, kHuberWeight); cost_ += this_cost; if (::isnan(features_residual_x[feature_index])) { // These residuals were invalid for the other cost, so ignore them for the comparison return; } // Both in the old and the new state, the residuals are valid. Compare them. PCGScalar other_cost = ComputeHuberCost(features_residual_x[feature_index], features_residual_y[feature_index], kHuberWeight); relative_cost_ += this_cost - other_cost; } __forceinline__ __device__ void SetJacobianComponent_ThreadConflictFree(u32 /*index*/, PCGScalar /*jac_x*/, PCGScalar /*jac_y*/) {} __forceinline__ __device__ void SetJacobianComponent_AllThreadsSameIndex(u32 /*index*/, PCGScalar /*jac_x*/, PCGScalar /*jac_y*/, bool /*valid*/) {} __forceinline__ __device__ void SetJacobianComponent_RandomThreadConflicts(u32 /*index*/, PCGScalar /*jac_x*/, PCGScalar /*jac_y*/) {} PCGScalar cost_ = 0; PCGScalar relative_cost_ = 0; typename hipcub::BlockReduce<PCGScalar, block_width, cub::BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY, block_height>::TempStorage* temp_storage_; }; template<int block_width, class Model, bool are_camera_tr_rig_in_state> __global__ void __launch_bounds__(/*maxThreadsPerBlock*/ kResidualJacobianBlockSize, /*minBlocksPerMultiprocessor*/ 1) PCGCompareCostCUDAKernel( CUDADatasetAndState s, Model model, CUDABuffer_<PCGScalar> pcg_cost, CUDABuffer_<PCGScalar> pcg_relative_cost) { unsigned int feature_index = blockIdx.x * blockDim.x + threadIdx.x; bool valid = feature_index < s.num_features; if (!valid) { feature_index = 0; } constexpr int block_height = 1; typedef hipcub::BlockReduce<PCGScalar, block_width, cub::BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY, block_height> BlockReduceScalar; __shared__ typename BlockReduceScalar::TempStorage temp_storage; PCGCompareCostAccumulator<block_width, block_height> accumulator(&temp_storage); ComputeResidualAndJacobian<block_width, block_height, /*compute_jacobians*/ false, are_camera_tr_rig_in_state>(valid, feature_index, s, model, accumulator); __syncthreads(); BlockedAtomicSum<block_width, block_height>( &pcg_cost(0, 0), accumulator.cost_, valid, &temp_storage); __syncthreads(); BlockedAtomicSum<block_width, block_height>( &pcg_relative_cost(0, 0), accumulator.relative_cost_, valid, &temp_storage); } template <class Model> void PCGCompareCostCUDA( hipStream_t stream, const CUDADatasetAndState& s, const Model& model, CUDABuffer_<PCGScalar>* pcg_cost, CUDABuffer_<PCGScalar>* pcg_relative_cost) { CHECK_CUDA_NO_ERROR(); if (s.num_features == 0) { return; } constexpr int block_width = kResidualJacobianBlockSize; dim3 grid_dim(GetBlockCount(s.num_features, block_width)); dim3 block_dim(block_width); bool are_camera_tr_rig_in_state = s.are_camera_tr_rig_in_state; COMPILE_OPTION( are_camera_tr_rig_in_state, hipLaunchKernelGGL(( PCGCompareCostCUDAKernel<block_width, Model, _are_camera_tr_rig_in_state>) , dim3(grid_dim), dim3(block_dim), 0, stream, s, model, *pcg_cost, *pcg_relative_cost);); CHECK_CUDA_NO_ERROR(); } template void PCGCompareCostCUDA<CUDACentralGenericModel>( hipStream_t stream, const CUDADatasetAndState& s, const CUDACentralGenericModel& model, CUDABuffer_<PCGScalar>* pcg_cost, CUDABuffer_<PCGScalar>* pcg_relative_cost); template <int block_width, int block_height> struct PCGInitAccumulator { __forceinline__ __device__ PCGInitAccumulator( CUDABuffer_<PCGScalar>* pcg_r, CUDABuffer_<PCGScalar>* pcg_M, typename hipcub::BlockReduce<PCGScalar, block_width, cub::BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY, block_height>::TempStorage* temp_storage) : pcg_r_(pcg_r), pcg_M_(pcg_M), temp_storage_(temp_storage) {} __forceinline__ __device__ void SetResidualsInvalid(PCGScalar* features_residual_x, u32 feature_index) { features_residual_x[feature_index] = CUDART_NAN_F; // features_residual_y[feature_index] = CUDART_NAN_F; } __forceinline__ __device__ void SetResiduals(PCGScalar residual_x, PCGScalar residual_y, PCGScalar* features_residual_x, PCGScalar* features_residual_y, u32 feature_index) { features_residual_x[feature_index] = residual_x; features_residual_y[feature_index] = residual_y; // Cache residuals and weights weight_ = ComputeHuberWeight(residual_x, residual_y, kHuberWeight); weighted_residual_x_ = weight_ * residual_x; weighted_residual_y_ = weight_ * residual_y; cost_ += ComputeHuberCost(residual_x, residual_y, kHuberWeight); } __forceinline__ __device__ void SetJacobianComponent_ThreadConflictFree(u32 index, PCGScalar jac_x, PCGScalar jac_y) { (*pcg_r_)(0, index) -= jac_x * weighted_residual_x_ + jac_y * weighted_residual_y_; (*pcg_M_)(0, index) += jac_x * weight_ * jac_x + jac_y * weight_ * jac_y; } __forceinline__ __device__ void SetJacobianComponent_AllThreadsSameIndex(u32 index, PCGScalar jac_x, PCGScalar jac_y, bool valid) { BlockedAtomicSum<block_width, block_height>( &(*pcg_r_)(0, index), -1 * jac_x * weighted_residual_x_ + -1 * jac_y * weighted_residual_y_, valid, temp_storage_); __syncthreads(); BlockedAtomicSum<block_width, block_height>( &(*pcg_M_)(0, index), jac_x * weight_ * jac_x + jac_y * weight_ * jac_y, valid, temp_storage_); __syncthreads(); } __forceinline__ __device__ void SetJacobianComponent_RandomThreadConflicts(u32 index, PCGScalar jac_x, PCGScalar jac_y) { atomicAddFloatOrDouble( &(*pcg_r_)(0, index), -1 * jac_x * weighted_residual_x_ + -1 * jac_y * weighted_residual_y_); atomicAddFloatOrDouble( &(*pcg_M_)(0, index), jac_x * weight_ * jac_x + jac_y * weight_ * jac_y); } PCGScalar weight_; PCGScalar weighted_residual_x_; PCGScalar weighted_residual_y_; PCGScalar cost_ = 0; CUDABuffer_<PCGScalar>* pcg_r_; CUDABuffer_<PCGScalar>* pcg_M_; typename hipcub::BlockReduce<PCGScalar, block_width, cub::BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY, block_height>::TempStorage* temp_storage_; }; template<int block_width, class Model, bool are_camera_tr_rig_in_state> __global__ void __launch_bounds__(/*maxThreadsPerBlock*/ kResidualJacobianBlockSize, /*minBlocksPerMultiprocessor*/ 1) PCGInitCUDAKernel( CUDADatasetAndState s, Model model, CUDABuffer_<PCGScalar> pcg_r, CUDABuffer_<PCGScalar> pcg_M, CUDABuffer_<PCGScalar> pcg_cost) { unsigned int feature_index = blockIdx.x * blockDim.x + threadIdx.x; bool valid = feature_index < s.num_features; if (!valid) { feature_index = 0; } constexpr int block_height = 1; typedef hipcub::BlockReduce<PCGScalar, block_width, cub::BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY, block_height> BlockReduceScalar; __shared__ typename BlockReduceScalar::TempStorage temp_storage; PCGInitAccumulator<block_width, block_height> accumulator(&pcg_r, &pcg_M, &temp_storage); ComputeResidualAndJacobian<block_width, block_height, /*compute_jacobians*/ true, are_camera_tr_rig_in_state>(valid, feature_index, s, model, accumulator); __syncthreads(); BlockedAtomicSum<block_width, block_height>( &pcg_cost(0, 0), accumulator.cost_, valid, &temp_storage); } template <class Model> void PCGInitCUDA( hipStream_t stream, const CUDADatasetAndState& s, const Model& model, CUDABuffer_<PCGScalar>* pcg_r, CUDABuffer_<PCGScalar>* pcg_M, CUDABuffer_<PCGScalar>* pcg_cost) { CHECK_CUDA_NO_ERROR(); if (s.num_features == 0) { return; } constexpr int block_width = kResidualJacobianBlockSize; dim3 grid_dim(GetBlockCount(s.num_features, block_width)); dim3 block_dim(block_width); bool are_camera_tr_rig_in_state = s.are_camera_tr_rig_in_state; COMPILE_OPTION( are_camera_tr_rig_in_state, hipLaunchKernelGGL(( PCGInitCUDAKernel<block_width, Model, _are_camera_tr_rig_in_state>) , dim3(grid_dim), dim3(block_dim), 0, stream, s, model, *pcg_r, *pcg_M, *pcg_cost);); CHECK_CUDA_NO_ERROR(); } template void PCGInitCUDA<CUDACentralGenericModel>( hipStream_t stream, const CUDADatasetAndState& s, const CUDACentralGenericModel& model, CUDABuffer_<PCGScalar>* pcg_r, CUDABuffer_<PCGScalar>* pcg_M, CUDABuffer_<PCGScalar>* pcg_cost); template<int block_width> __global__ void PCGInit2CUDAKernel( u32 unknown_count, PCGScalar lambda, CUDABuffer_<PCGScalar> pcg_r, CUDABuffer_<PCGScalar> pcg_M, CUDABuffer_<PCGScalar> pcg_delta, CUDABuffer_<PCGScalar> pcg_g, CUDABuffer_<PCGScalar> pcg_p, CUDABuffer_<PCGScalar> pcg_alpha_n) { unsigned int unknown_index = blockIdx.x * blockDim.x + threadIdx.x; constexpr int block_height = 1; typedef hipcub::BlockReduce<PCGScalar, block_width, cub::BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY, block_height> BlockReduceScalar; __shared__ typename BlockReduceScalar::TempStorage temp_storage; PCGScalar alpha_term; if (unknown_index < unknown_count) { pcg_g(0, unknown_index) = 0; // p_0 = M^-1 r_0 // The addition of lambda is also handled here. PCGScalar r_value = pcg_r(0, unknown_index); PCGScalar p_value = r_value / (pcg_M(0, unknown_index) + lambda); pcg_p(0, unknown_index) = p_value; // delta_0 = 0 pcg_delta(0, unknown_index) = 0; // alpha_n_0 = r_0^T p_0 alpha_term = r_value * p_value; } BlockedAtomicSum<block_width, block_height>( &pcg_alpha_n(0, 0), alpha_term, unknown_index < unknown_count, &temp_storage); } void PCGInit2CUDA( hipStream_t stream, u32 unknown_count, PCGScalar lambda, const CUDABuffer_<PCGScalar>& pcg_r, const CUDABuffer_<PCGScalar>& pcg_M, CUDABuffer_<PCGScalar>* pcg_delta, CUDABuffer_<PCGScalar>* pcg_g, CUDABuffer_<PCGScalar>* pcg_p, CUDABuffer_<PCGScalar>* pcg_alpha_n) { CHECK_CUDA_NO_ERROR(); if (unknown_count == 0) { return; } hipMemsetAsync(pcg_alpha_n->address(), 0, 1 * sizeof(PCGScalar), stream); CUDA_AUTO_TUNE_1D_TEMPLATED( PCGInit2CUDAKernel, 1024, unknown_count, 0, stream, TEMPLATE_ARGUMENTS(block_width), /* kernel parameters */ unknown_count, lambda, pcg_r, pcg_M, *pcg_delta, *pcg_g, *pcg_p, *pcg_alpha_n); CHECK_CUDA_NO_ERROR(); } template <int block_width, int block_height> struct PCGStep1SumAccumulator { __forceinline__ __device__ PCGStep1SumAccumulator(const CUDABuffer_<PCGScalar>& pcg_p) : pcg_p_(pcg_p) {} __forceinline__ __device__ void SetResidualsInvalid(PCGScalar* features_residual_x, u32 feature_index) { // Do nothing } __forceinline__ __device__ void SetResiduals(PCGScalar residual_x, PCGScalar residual_y, PCGScalar* features_residual_x, PCGScalar* features_residual_y, u32 feature_index) { // Cache weights weight_ = ComputeHuberWeight(residual_x, residual_y, kHuberWeight); } __forceinline__ __device__ void SetJacobianComponent_ThreadConflictFree(u32 index, PCGScalar jac_x, PCGScalar jac_y) { PCGScalar p = pcg_p_(0, index); sum_x_ += jac_x * p; sum_y_ += jac_y * p; } __forceinline__ __device__ void SetJacobianComponent_AllThreadsSameIndex(u32 index, PCGScalar jac_x, PCGScalar jac_y, bool valid) { if (valid) { PCGScalar p = pcg_p_(0, index); sum_x_ += jac_x * p; sum_y_ += jac_y * p; } } __forceinline__ __device__ void SetJacobianComponent_RandomThreadConflicts(u32 index, PCGScalar jac_x, PCGScalar jac_y) { PCGScalar p = pcg_p_(0, index); sum_x_ += jac_x * p; sum_y_ += jac_y * p; } PCGScalar sum_x_ = 0; // holds the result of (J * p) for the row of the first residual. PCGScalar sum_y_ = 0; // holds the result of (J * p) for the row of the second residual. PCGScalar weight_ = 0; const CUDABuffer_<PCGScalar>& pcg_p_; }; template <int block_width, int block_height> struct PCGStep1ResolveAccumulator { __forceinline__ __device__ PCGStep1ResolveAccumulator( PCGScalar sum_x, PCGScalar sum_y, CUDABuffer_<PCGScalar>* pcg_g, typename hipcub::BlockReduce<PCGScalar, block_width, cub::BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY, block_height>::TempStorage* temp_storage) : sum_x_(sum_x), sum_y_(sum_y), pcg_g_(pcg_g), temp_storage_(temp_storage) {} __forceinline__ __device__ void SetResidualsInvalid(PCGScalar* features_residual_x, u32 feature_index) { // Do nothing } __forceinline__ __device__ void SetResiduals(PCGScalar residual_x, PCGScalar residual_y, PCGScalar* features_residual_x, PCGScalar* features_residual_y, u32 feature_index) { // Do nothing } __forceinline__ __device__ void SetJacobianComponent_ThreadConflictFree(u32 index, PCGScalar jac_x, PCGScalar jac_y) { (*pcg_g_)(0, index) += jac_x * sum_x_ + jac_y * sum_y_; } __forceinline__ __device__ void SetJacobianComponent_AllThreadsSameIndex(u32 index, PCGScalar jac_x, PCGScalar jac_y, bool valid) { BlockedAtomicSum<block_width, block_height>( &(*pcg_g_)(0, index), jac_x * sum_x_ + jac_y * sum_y_, valid, temp_storage_); __syncthreads(); } __forceinline__ __device__ void SetJacobianComponent_RandomThreadConflicts(u32 index, PCGScalar jac_x, PCGScalar jac_y) { atomicAddFloatOrDouble( &(*pcg_g_)(0, index), jac_x * sum_x_ + jac_y * sum_y_); } PCGScalar sum_x_; PCGScalar sum_y_; CUDABuffer_<PCGScalar>* pcg_g_; typename hipcub::BlockReduce<PCGScalar, block_width, cub::BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY, block_height>::TempStorage* temp_storage_; }; template<int block_width, class Model, bool are_camera_tr_rig_in_state> __global__ void __launch_bounds__(/*maxThreadsPerBlock*/ kResidualJacobianBlockSize, /*minBlocksPerMultiprocessor*/ 1) PCGStep1CUDAKernel( CUDADatasetAndState s, Model model, CUDABuffer_<PCGScalar> pcg_p, CUDABuffer_<PCGScalar> pcg_g, CUDABuffer_<PCGScalar> pcg_alpha_d) { unsigned int feature_index = blockIdx.x * blockDim.x + threadIdx.x; bool valid = feature_index < s.num_features; if (!valid) { feature_index = 0; } constexpr int block_height = 1; typedef hipcub::BlockReduce<PCGScalar, block_width, cub::BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY, block_height> BlockReduceScalar; __shared__ typename BlockReduceScalar::TempStorage temp_storage; PCGScalar weight; PCGScalar sum_x; PCGScalar sum_y; { PCGStep1SumAccumulator<block_width, block_height> accumulator(pcg_p); ComputeResidualAndJacobian<block_width, block_height, /*compute_jacobians*/ true, are_camera_tr_rig_in_state>(valid, feature_index, s, model, accumulator); weight = accumulator.weight_; sum_x = accumulator.sum_x_; sum_y = accumulator.sum_y_; } BlockedAtomicSum<block_width, block_height>( &pcg_alpha_d(0, 0), sum_x * weight * sum_x + sum_y * weight * sum_y, valid, &temp_storage); sum_x *= weight; sum_y *= weight; __syncthreads(); // TODO: Try storing sum_x and sum_y in global memory here and moving the // part below into its own kernel. It might be faster since it might be // possible to run one of the two resulting kernels with higher // parallelism than the current large kernel. { PCGStep1ResolveAccumulator<block_width, block_height> accumulator(sum_x, sum_y, &pcg_g, &temp_storage); ComputeResidualAndJacobian<block_width, block_height, /*compute_jacobians*/ true, are_camera_tr_rig_in_state>(valid, feature_index, s, model, accumulator); } } template<int block_width> __global__ void AddAlphaDEpsilonTermsCUDAKernel( u32 unknown_count, PCGScalar lambda, CUDABuffer_<PCGScalar> pcg_p, CUDABuffer_<PCGScalar> pcg_alpha_d) { unsigned int unknown_index = blockIdx.x * blockDim.x + threadIdx.x; bool valid = unknown_index < unknown_count; if (!valid) { unknown_index = unknown_count - 1; } PCGScalar p_value = pcg_p(0, unknown_index); PCGScalar term = lambda * p_value * p_value; constexpr int block_height = 1; typedef hipcub::BlockReduce<PCGScalar, block_width, cub::BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY, block_height> BlockReduceScalar; __shared__ typename BlockReduceScalar::TempStorage temp_storage; BlockedAtomicSum<block_width, block_height>( &pcg_alpha_d(0, 0), term, valid, &temp_storage); } template <class Model> void PCGStep1CUDA( hipStream_t stream, u32 unknown_count, const CUDADatasetAndState& s, const Model& model, CUDABuffer_<PCGScalar>* pcg_p, CUDABuffer_<PCGScalar>* pcg_g, CUDABuffer_<PCGScalar>* pcg_alpha_d) { CHECK_CUDA_NO_ERROR(); if (s.num_features == 0) { return; } constexpr int block_width = kResidualJacobianBlockSize; dim3 grid_dim(GetBlockCount(s.num_features, block_width)); dim3 block_dim(block_width); bool are_camera_tr_rig_in_state = s.are_camera_tr_rig_in_state; COMPILE_OPTION( are_camera_tr_rig_in_state, hipLaunchKernelGGL(( PCGStep1CUDAKernel<block_width, Model, _are_camera_tr_rig_in_state>) , dim3(grid_dim), dim3(block_dim), 0, stream, s, model, *pcg_p, *pcg_g, *pcg_alpha_d);); CHECK_CUDA_NO_ERROR(); } template void PCGStep1CUDA<CUDACentralGenericModel>( hipStream_t stream, u32 unknown_count, const CUDADatasetAndState& s, const CUDACentralGenericModel& model, CUDABuffer_<PCGScalar>* pcg_p, CUDABuffer_<PCGScalar>* pcg_g, CUDABuffer_<PCGScalar>* pcg_alpha_d); template<int block_width> __global__ void PCGStep2CUDAKernel( u32 unknown_count, PCGScalar lambda, CUDABuffer_<PCGScalar> pcg_r, CUDABuffer_<PCGScalar> pcg_M, CUDABuffer_<PCGScalar> pcg_delta, CUDABuffer_<PCGScalar> pcg_g, CUDABuffer_<PCGScalar> pcg_p, CUDABuffer_<PCGScalar> pcg_alpha_n, CUDABuffer_<PCGScalar> pcg_alpha_d, CUDABuffer_<PCGScalar> pcg_beta_n) { unsigned int unknown_index = blockIdx.x * blockDim.x + threadIdx.x; PCGScalar beta_term; constexpr int block_height = 1; typedef hipcub::BlockReduce<PCGScalar, block_width, cub::BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY, block_height> BlockReduceScalar; __shared__ typename BlockReduceScalar::TempStorage temp_storage; if (unknown_index < unknown_count) { // TODO: Default to 1 or to 0 if denominator is near-zero? Stop optimization if that happens? PCGScalar alpha = (pcg_alpha_d(0, 0) >= 1e-35f) ? (pcg_alpha_n(0, 0) / pcg_alpha_d(0, 0)) : 0; PCGScalar p_value = pcg_p(0, unknown_index); pcg_delta(0, unknown_index) += alpha * p_value; PCGScalar r_value = pcg_r(0, unknown_index); r_value -= alpha * (pcg_g(0, unknown_index) + lambda * p_value); pcg_r(0, unknown_index) = r_value; // This is called z in the Opt paper, but stored in g here to save memory. PCGScalar z_value = r_value / (pcg_M(0, unknown_index) + lambda); pcg_g(0, unknown_index) = z_value; beta_term = z_value * r_value; } BlockedAtomicSum<block_width, block_height>( &pcg_beta_n(0, 0), beta_term, unknown_index < unknown_count, &temp_storage); } void PCGStep2CUDA( hipStream_t stream, u32 unknown_count, PCGScalar lambda, CUDABuffer_<PCGScalar>* pcg_r, const CUDABuffer_<PCGScalar>& pcg_M, CUDABuffer_<PCGScalar>* pcg_delta, CUDABuffer_<PCGScalar>* pcg_g, CUDABuffer_<PCGScalar>* pcg_p, CUDABuffer_<PCGScalar>* pcg_alpha_n, CUDABuffer_<PCGScalar>* pcg_alpha_d, CUDABuffer_<PCGScalar>* pcg_beta_n) { CHECK_CUDA_NO_ERROR(); if (unknown_count == 0) { return; } CUDA_AUTO_TUNE_1D_TEMPLATED( AddAlphaDEpsilonTermsCUDAKernel, 1024, unknown_count, 0, stream, TEMPLATE_ARGUMENTS(block_width), /* kernel parameters */ unknown_count, lambda, *pcg_p, *pcg_alpha_d); CHECK_CUDA_NO_ERROR(); hipMemsetAsync(pcg_beta_n->address(), 0, 1 * sizeof(PCGScalar), stream); CUDA_AUTO_TUNE_1D_TEMPLATED( PCGStep2CUDAKernel, 1024, unknown_count, 0, stream, TEMPLATE_ARGUMENTS(block_width), /* kernel parameters */ unknown_count, lambda, *pcg_r, pcg_M, *pcg_delta, *pcg_g, *pcg_p, *pcg_alpha_n, *pcg_alpha_d, *pcg_beta_n); CHECK_CUDA_NO_ERROR(); } template<int block_width> __global__ void PCGStep3CUDAKernel( u32 unknown_count, CUDABuffer_<PCGScalar> pcg_g, CUDABuffer_<PCGScalar> pcg_p, CUDABuffer_<PCGScalar> pcg_alpha_n, CUDABuffer_<PCGScalar> pcg_beta_n) { unsigned int unknown_index = blockIdx.x * blockDim.x + threadIdx.x; if (unknown_index < unknown_count) { // TODO: Default to 1 or to 0 if denominator is near-zero? Stop optimization if that happens? PCGScalar beta = (pcg_alpha_n(0, 0) >= 1e-35f) ? (pcg_beta_n(0, 0) / pcg_alpha_n(0, 0)) : 0; pcg_p(0, unknown_index) = pcg_g/*z*/(0, unknown_index) + beta * pcg_p(0, unknown_index); } } void PCGStep3CUDA( hipStream_t stream, u32 unknown_count, CUDABuffer_<PCGScalar>* pcg_g, CUDABuffer_<PCGScalar>* pcg_p, CUDABuffer_<PCGScalar>* pcg_alpha_n, CUDABuffer_<PCGScalar>* pcg_beta_n) { CHECK_CUDA_NO_ERROR(); if (unknown_count == 0) { return; } CUDA_AUTO_TUNE_1D_TEMPLATED( PCGStep3CUDAKernel, 1024, unknown_count, 0, stream, TEMPLATE_ARGUMENTS(block_width), /* kernel parameters */ unknown_count, *pcg_g, *pcg_p, *pcg_alpha_n, *pcg_beta_n); CHECK_CUDA_NO_ERROR(); } }
88f9942e725d482f2aaf3b2f17a935b11d975db7.cu
// Copyright 2019 ETH Zürich, Thomas Schöps // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // 3. Neither the name of the copyright holder nor the names of its contributors // may be used to endorse or promote products derived from this software // without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE // POSSIBILITY OF SUCH DAMAGE. #include "camera_calibration/bundle_adjustment/cuda_joint_optimization.cuh" #include <cub/cub.cuh> #include <cuda_runtime.h> #include <libvis/cuda/cuda_auto_tuner.h> #include <libvis/cuda/cuda_util.h> #include <math_constants.h> #include "camera_calibration/bundle_adjustment/joint_optimization_jacobians.h" #include "camera_calibration/cuda/cuda_matrix.cuh" #include "camera_calibration/cuda/cuda_util.cuh" #include "camera_calibration/models/cuda_central_generic_model.cuh" namespace vis { /* * Schema for accumulator classes: * * struct Accumulator { * /// Called if the residuals belonging to this thread are invalid. * /// This is only called once and before any SetJacobianComponent() call, * /// except for calls to SetJacobianComponent_AllThreadsSameIndex() with valid == false. * __forceinline__ __device__ void SetResidualsInvalid(PCGScalar* features_residual_x, u32 feature_index); * * /// Called if the residuals belonging to this thread are valid. * /// This is only called once and before any SetJacobianComponent() call, * /// except for calls to SetJacobianComponent_AllThreadsSameIndex() with valid == false. * __forceinline__ __device__ void SetResiduals(PCGScalar residual_x, PCGScalar residual_y, PCGScalar* features_residual_x, PCGScalar* features_residual_y, u32 feature_index); * * /// Sets the values of one column in the [2 x N] Jacobian of the pixel position * /// wrt. the unknowns. I.e., value_x corresponds to the x-residual (row 0) and value_y * /// to the y-residual (row 1). * /// * /// This version is called if there are no possible conflicts between * /// different threads in the kernel call, i.e., for a given thread, no other * /// thread in the thread grid possibly writes to the same index. * /// * /// In case the residuals are invalid (SetResidualsInvalid() has been called * /// before), this function must not be called. * __forceinline__ __device__ void SetJacobianComponent_ThreadConflictFree(u32 index, PCGScalar jac_x, PCGScalar jac_y); * * /// See SetJacobianComponent_ThreadConflictFree(). This version is used * /// if all threads in the thread block write to the same index. * /// * /// This variant of SetJacobianComponent() is called both if the residuals * /// are valid and if they are invalid to enable block-wide operations. * /// * /// NOTE: If the temp_storage is used before, a __syncthreads() has to be done. * __forceinline__ __device__ void SetJacobianComponent_AllThreadsSameIndex(u32 index, PCGScalar jac_x, PCGScalar jac_y, bool valid); * * /// See SetJacobianComponent_ThreadConflictFree(). This version is used * /// if none of the other two versions applies. * /// * /// In case the residuals are invalid (SetResidualsInvalid() has been called * /// before), this function must not be called. * __forceinline__ __device__ void SetJacobianComponent_RandomThreadConflicts(u32 index, PCGScalar jac_x, PCGScalar jac_y); * }; */ constexpr PCGScalar kHuberWeight = 1.0; // TODO: Make parameter constexpr int kResidualJacobianBlockSize = 256; template<int block_width, int block_height, bool compute_jacobians, bool are_camera_tr_rig_in_state, class Model, class Accumulator> __device__ void ComputeResidualAndJacobian( bool valid, u32 feature_index, CUDADatasetAndState& s, Model& model, Accumulator& accumulator) { u16 point_index = s.features_index[feature_index]; float3 point = make_float3(s.points[0][point_index], s.points[1][point_index], s.points[2][point_index]); u16 image_index = s.features_image[feature_index]; float3 local_point = s.image_tr_global[image_index] * point; float2 pixel = make_float2(0.5f * (model.calibration_min_x() + model.calibration_max_x() + 1), 0.5f * (model.calibration_min_y() + model.calibration_max_y() + 1)); if (!model.ProjectWithInitialEstimate(local_point, &pixel)) { if (valid) { accumulator.SetResidualsInvalid(s.features_residual_x, feature_index); } valid = false; } if (valid) { accumulator.SetResiduals( pixel.x - s.features_x[feature_index], pixel.y - s.features_y[feature_index], s.features_residual_x, s.features_residual_y, feature_index); } if (compute_jacobians) { // Compute Jacobian wrt. image pose, optionally camera_tr_rig, and point position [2 x (6 + (rig ? 6 : 0) + 3)]. // Residual: Project(exp(delta) * image_tr_pattern * pattern_point) - measurement // Compute Jacobian as follows: // (d pixel) / (d local_point) [2 x 3], numerical // * (d local_point) / (d pose and global_point) [3 x (7 + (rig ? 7 : 0) + 3)], analytical // Numerical part: CUDAMatrix<PCGScalar, 2, 3> pixel_wrt_local_point; const PCGScalar kDelta = s.numerical_diff_delta * (model.is_central_camera_model() ? Norm(local_point) : 0.1); #pragma unroll for (int dimension = 0; dimension < 3; ++ dimension) { float3 offset_point = local_point; *(&offset_point.x + dimension) += kDelta; float2 offset_pixel = pixel; if (!model.ProjectWithInitialEstimate(offset_point, &offset_pixel)) { valid = false; break; } pixel_wrt_local_point(0, dimension) = (offset_pixel.x - pixel.x) / kDelta; pixel_wrt_local_point(1, dimension) = (offset_pixel.y - pixel.y) / kDelta; } // Analytical part: CUDAMatrix<PCGScalar, 3, 7 + 7 + 3> local_point_wrt_poses_and_global_point; if (are_camera_tr_rig_in_state) { ComputeRigJacobian( s.camera_q_rig[4 * s.camera_index + 0], s.camera_q_rig[4 * s.camera_index + 1], s.camera_q_rig[4 * s.camera_index + 2], s.camera_q_rig[4 * s.camera_index + 3], point.x, point.y, point.z, s.rig_tr_global[7 * image_index + 0], s.rig_tr_global[7 * image_index + 1], s.rig_tr_global[7 * image_index + 2], s.rig_tr_global[7 * image_index + 3], s.rig_tr_global[7 * image_index + 4], s.rig_tr_global[7 * image_index + 5], s.rig_tr_global[7 * image_index + 6], local_point_wrt_poses_and_global_point.row(0), local_point_wrt_poses_and_global_point.row(1), local_point_wrt_poses_and_global_point.row(2)); } else { // NOTE: The first row expects image_q_global values. Thus, here we assume // that rig_q_global == image_q_global, i.e., the camera_q_rig // transformation is identity. ComputeJacobian( s.rig_tr_global[7 * image_index + 0], s.rig_tr_global[7 * image_index + 1], s.rig_tr_global[7 * image_index + 2], s.rig_tr_global[7 * image_index + 3], point.x, point.y, point.z, local_point_wrt_poses_and_global_point.row(0), local_point_wrt_poses_and_global_point.row(1), local_point_wrt_poses_and_global_point.row(2)); } CUDAMatrix<PCGScalar, 2, 6> pose_jacobian; CUDAMatrix<PCGScalar, 2, 6> rig_jacobian; CUDAMatrix<PCGScalar, 2, 3> point_jacobian; if (are_camera_tr_rig_in_state) { // local_point_wrt_poses_and_global_point contains the Jacobian wrt.: // - rig_tr_global (indices 0 .. 6) // - camera_tr_rig (indices 7 .. 13) // - global_point (indices 14 .. 16) CUDAMatrix<PCGScalar, 4, 3> camera_q_rig_wrt_update; QuaternionJacobianWrtLocalUpdate(s.camera_q_rig[4 * s.camera_index + 0], s.camera_q_rig[4 * s.camera_index + 1], s.camera_q_rig[4 * s.camera_index + 2], s.camera_q_rig[4 * s.camera_index + 3], &camera_q_rig_wrt_update); CUDAMatrix<PCGScalar, 4, 3> rig_q_global_wrt_update; QuaternionJacobianWrtLocalUpdate(s.rig_tr_global[7 * image_index + 0], s.rig_tr_global[7 * image_index + 1], s.rig_tr_global[7 * image_index + 2], s.rig_tr_global[7 * image_index + 3], &rig_q_global_wrt_update); CUDAMatrix<PCGScalar, 2, 4> temp; MatrixMultiply(temp, pixel_wrt_local_point, local_point_wrt_poses_and_global_point.cols<4>(0)); MatrixMultiply(pose_jacobian.cols<3>(0), temp, rig_q_global_wrt_update); MatrixMultiply(pose_jacobian.cols<3>(3), pixel_wrt_local_point, local_point_wrt_poses_and_global_point.cols<3>(0 + 4)); MatrixMultiply(temp, pixel_wrt_local_point, local_point_wrt_poses_and_global_point.cols<4>(7)); MatrixMultiply(rig_jacobian.cols<3>(0), temp, camera_q_rig_wrt_update); MatrixMultiply(rig_jacobian.cols<3>(3), pixel_wrt_local_point, local_point_wrt_poses_and_global_point.cols<3>(7 + 4)); MatrixMultiply(point_jacobian, pixel_wrt_local_point, local_point_wrt_poses_and_global_point.cols<3>(14)); } else { // local_point_wrt_poses_and_global_point contains the Jacobian wrt.: // - rig_tr_global (indices 0 .. 6) // - global_point (indices 7 .. 9) // NOTE: Here, we assume that rig_q_global == image_q_global, i.e., the // camera_q_rig transformation is identity. CUDAMatrix<PCGScalar, 4, 3> quaternion_wrt_update; // derived in derive_jacobians.py QuaternionJacobianWrtLocalUpdate(s.rig_tr_global[7 * image_index + 0], s.rig_tr_global[7 * image_index + 1], s.rig_tr_global[7 * image_index + 2], s.rig_tr_global[7 * image_index + 3], &quaternion_wrt_update); CUDAMatrix<PCGScalar, 2, 4> temp; MatrixMultiply(temp, pixel_wrt_local_point, local_point_wrt_poses_and_global_point.cols<4>(0)); MatrixMultiply(pose_jacobian.cols<3>(0), temp, quaternion_wrt_update); MatrixMultiply(pose_jacobian.cols<3>(3), pixel_wrt_local_point, local_point_wrt_poses_and_global_point.cols<3>(4)); MatrixMultiply(point_jacobian, pixel_wrt_local_point, local_point_wrt_poses_and_global_point.cols<3>(7)); } // Get the model Jacobian constexpr int num_intrinsic_variables = Model::IntrinsicsJacobianSize; CUDAMatrix<u32, num_intrinsic_variables, 1> grid_update_indices; CUDAMatrix<PCGScalar, 2, num_intrinsic_variables> intrinsic_jac; if (!model.ProjectionJacobianWrtIntrinsics( local_point, pixel, s.numerical_diff_delta, grid_update_indices.data(), intrinsic_jac.row(0), intrinsic_jac.row(1))) { valid = false; } // Accumulate Jacobians: if (are_camera_tr_rig_in_state) { if (valid) { for (int i = 0; valid && i < 6; ++ i) { accumulator.SetJacobianComponent_RandomThreadConflicts( s.rig_tr_global_start_index + 6 * image_index + i, pose_jacobian(0, i), pose_jacobian(1, i)); } } for (int i = 0; i < 6; ++ i) { accumulator.SetJacobianComponent_AllThreadsSameIndex( s.camera_tr_rig_start_index + s.camera_index * 6 + i, rig_jacobian(0, i), rig_jacobian(1, i), valid); } if (valid) { for (int i = 0; i < 3; ++ i) { accumulator.SetJacobianComponent_RandomThreadConflicts( s.points_start_index + point_index * 3 + i, point_jacobian(0, i), point_jacobian(1, i)); } } } else { if (valid) { for (int i = 0; i < 6; ++ i) { accumulator.SetJacobianComponent_RandomThreadConflicts( s.rig_tr_global_start_index + 6 * image_index + i, pose_jacobian(0, i), pose_jacobian(1, i)); } for (int i = 0; i < 3; ++ i) { accumulator.SetJacobianComponent_RandomThreadConflicts( s.points_start_index + point_index * 3 + i, point_jacobian(0, i), point_jacobian(1, i)); } } } if (valid) { for (int i = 0; i < num_intrinsic_variables; ++ i) { accumulator.SetJacobianComponent_RandomThreadConflicts( s.intrinsic_start_index + grid_update_indices(i), intrinsic_jac(0, i), intrinsic_jac(1, i)); } } } } template <int block_width, int block_height> struct PCGCompareCostAccumulator { __forceinline__ __device__ PCGCompareCostAccumulator( typename cub::BlockReduce<PCGScalar, block_width, cub::BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY, block_height>::TempStorage* temp_storage) : temp_storage_(temp_storage) {} __forceinline__ __device__ void SetResidualsInvalid(PCGScalar* features_residual_x, u32 feature_index) { // No need to do anything, as these residuals do not matter for the comparison } __forceinline__ __device__ void SetResiduals(PCGScalar residual_x, PCGScalar residual_y, PCGScalar* features_residual_x, PCGScalar* features_residual_y, u32 feature_index) { PCGScalar this_cost = ComputeHuberCost(residual_x, residual_y, kHuberWeight); cost_ += this_cost; if (::isnan(features_residual_x[feature_index])) { // These residuals were invalid for the other cost, so ignore them for the comparison return; } // Both in the old and the new state, the residuals are valid. Compare them. PCGScalar other_cost = ComputeHuberCost(features_residual_x[feature_index], features_residual_y[feature_index], kHuberWeight); relative_cost_ += this_cost - other_cost; } __forceinline__ __device__ void SetJacobianComponent_ThreadConflictFree(u32 /*index*/, PCGScalar /*jac_x*/, PCGScalar /*jac_y*/) {} __forceinline__ __device__ void SetJacobianComponent_AllThreadsSameIndex(u32 /*index*/, PCGScalar /*jac_x*/, PCGScalar /*jac_y*/, bool /*valid*/) {} __forceinline__ __device__ void SetJacobianComponent_RandomThreadConflicts(u32 /*index*/, PCGScalar /*jac_x*/, PCGScalar /*jac_y*/) {} PCGScalar cost_ = 0; PCGScalar relative_cost_ = 0; typename cub::BlockReduce<PCGScalar, block_width, cub::BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY, block_height>::TempStorage* temp_storage_; }; template<int block_width, class Model, bool are_camera_tr_rig_in_state> __global__ void __launch_bounds__(/*maxThreadsPerBlock*/ kResidualJacobianBlockSize, /*minBlocksPerMultiprocessor*/ 1) PCGCompareCostCUDAKernel( CUDADatasetAndState s, Model model, CUDABuffer_<PCGScalar> pcg_cost, CUDABuffer_<PCGScalar> pcg_relative_cost) { unsigned int feature_index = blockIdx.x * blockDim.x + threadIdx.x; bool valid = feature_index < s.num_features; if (!valid) { feature_index = 0; } constexpr int block_height = 1; typedef cub::BlockReduce<PCGScalar, block_width, cub::BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY, block_height> BlockReduceScalar; __shared__ typename BlockReduceScalar::TempStorage temp_storage; PCGCompareCostAccumulator<block_width, block_height> accumulator(&temp_storage); ComputeResidualAndJacobian<block_width, block_height, /*compute_jacobians*/ false, are_camera_tr_rig_in_state>(valid, feature_index, s, model, accumulator); __syncthreads(); BlockedAtomicSum<block_width, block_height>( &pcg_cost(0, 0), accumulator.cost_, valid, &temp_storage); __syncthreads(); BlockedAtomicSum<block_width, block_height>( &pcg_relative_cost(0, 0), accumulator.relative_cost_, valid, &temp_storage); } template <class Model> void PCGCompareCostCUDA( cudaStream_t stream, const CUDADatasetAndState& s, const Model& model, CUDABuffer_<PCGScalar>* pcg_cost, CUDABuffer_<PCGScalar>* pcg_relative_cost) { CHECK_CUDA_NO_ERROR(); if (s.num_features == 0) { return; } constexpr int block_width = kResidualJacobianBlockSize; dim3 grid_dim(GetBlockCount(s.num_features, block_width)); dim3 block_dim(block_width); bool are_camera_tr_rig_in_state = s.are_camera_tr_rig_in_state; COMPILE_OPTION( are_camera_tr_rig_in_state, PCGCompareCostCUDAKernel<block_width, Model, _are_camera_tr_rig_in_state> <<<grid_dim, block_dim, 0, stream>>>( s, model, *pcg_cost, *pcg_relative_cost);); CHECK_CUDA_NO_ERROR(); } template void PCGCompareCostCUDA<CUDACentralGenericModel>( cudaStream_t stream, const CUDADatasetAndState& s, const CUDACentralGenericModel& model, CUDABuffer_<PCGScalar>* pcg_cost, CUDABuffer_<PCGScalar>* pcg_relative_cost); template <int block_width, int block_height> struct PCGInitAccumulator { __forceinline__ __device__ PCGInitAccumulator( CUDABuffer_<PCGScalar>* pcg_r, CUDABuffer_<PCGScalar>* pcg_M, typename cub::BlockReduce<PCGScalar, block_width, cub::BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY, block_height>::TempStorage* temp_storage) : pcg_r_(pcg_r), pcg_M_(pcg_M), temp_storage_(temp_storage) {} __forceinline__ __device__ void SetResidualsInvalid(PCGScalar* features_residual_x, u32 feature_index) { features_residual_x[feature_index] = CUDART_NAN_F; // features_residual_y[feature_index] = CUDART_NAN_F; } __forceinline__ __device__ void SetResiduals(PCGScalar residual_x, PCGScalar residual_y, PCGScalar* features_residual_x, PCGScalar* features_residual_y, u32 feature_index) { features_residual_x[feature_index] = residual_x; features_residual_y[feature_index] = residual_y; // Cache residuals and weights weight_ = ComputeHuberWeight(residual_x, residual_y, kHuberWeight); weighted_residual_x_ = weight_ * residual_x; weighted_residual_y_ = weight_ * residual_y; cost_ += ComputeHuberCost(residual_x, residual_y, kHuberWeight); } __forceinline__ __device__ void SetJacobianComponent_ThreadConflictFree(u32 index, PCGScalar jac_x, PCGScalar jac_y) { (*pcg_r_)(0, index) -= jac_x * weighted_residual_x_ + jac_y * weighted_residual_y_; (*pcg_M_)(0, index) += jac_x * weight_ * jac_x + jac_y * weight_ * jac_y; } __forceinline__ __device__ void SetJacobianComponent_AllThreadsSameIndex(u32 index, PCGScalar jac_x, PCGScalar jac_y, bool valid) { BlockedAtomicSum<block_width, block_height>( &(*pcg_r_)(0, index), -1 * jac_x * weighted_residual_x_ + -1 * jac_y * weighted_residual_y_, valid, temp_storage_); __syncthreads(); BlockedAtomicSum<block_width, block_height>( &(*pcg_M_)(0, index), jac_x * weight_ * jac_x + jac_y * weight_ * jac_y, valid, temp_storage_); __syncthreads(); } __forceinline__ __device__ void SetJacobianComponent_RandomThreadConflicts(u32 index, PCGScalar jac_x, PCGScalar jac_y) { atomicAddFloatOrDouble( &(*pcg_r_)(0, index), -1 * jac_x * weighted_residual_x_ + -1 * jac_y * weighted_residual_y_); atomicAddFloatOrDouble( &(*pcg_M_)(0, index), jac_x * weight_ * jac_x + jac_y * weight_ * jac_y); } PCGScalar weight_; PCGScalar weighted_residual_x_; PCGScalar weighted_residual_y_; PCGScalar cost_ = 0; CUDABuffer_<PCGScalar>* pcg_r_; CUDABuffer_<PCGScalar>* pcg_M_; typename cub::BlockReduce<PCGScalar, block_width, cub::BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY, block_height>::TempStorage* temp_storage_; }; template<int block_width, class Model, bool are_camera_tr_rig_in_state> __global__ void __launch_bounds__(/*maxThreadsPerBlock*/ kResidualJacobianBlockSize, /*minBlocksPerMultiprocessor*/ 1) PCGInitCUDAKernel( CUDADatasetAndState s, Model model, CUDABuffer_<PCGScalar> pcg_r, CUDABuffer_<PCGScalar> pcg_M, CUDABuffer_<PCGScalar> pcg_cost) { unsigned int feature_index = blockIdx.x * blockDim.x + threadIdx.x; bool valid = feature_index < s.num_features; if (!valid) { feature_index = 0; } constexpr int block_height = 1; typedef cub::BlockReduce<PCGScalar, block_width, cub::BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY, block_height> BlockReduceScalar; __shared__ typename BlockReduceScalar::TempStorage temp_storage; PCGInitAccumulator<block_width, block_height> accumulator(&pcg_r, &pcg_M, &temp_storage); ComputeResidualAndJacobian<block_width, block_height, /*compute_jacobians*/ true, are_camera_tr_rig_in_state>(valid, feature_index, s, model, accumulator); __syncthreads(); BlockedAtomicSum<block_width, block_height>( &pcg_cost(0, 0), accumulator.cost_, valid, &temp_storage); } template <class Model> void PCGInitCUDA( cudaStream_t stream, const CUDADatasetAndState& s, const Model& model, CUDABuffer_<PCGScalar>* pcg_r, CUDABuffer_<PCGScalar>* pcg_M, CUDABuffer_<PCGScalar>* pcg_cost) { CHECK_CUDA_NO_ERROR(); if (s.num_features == 0) { return; } constexpr int block_width = kResidualJacobianBlockSize; dim3 grid_dim(GetBlockCount(s.num_features, block_width)); dim3 block_dim(block_width); bool are_camera_tr_rig_in_state = s.are_camera_tr_rig_in_state; COMPILE_OPTION( are_camera_tr_rig_in_state, PCGInitCUDAKernel<block_width, Model, _are_camera_tr_rig_in_state> <<<grid_dim, block_dim, 0, stream>>>( s, model, *pcg_r, *pcg_M, *pcg_cost);); CHECK_CUDA_NO_ERROR(); } template void PCGInitCUDA<CUDACentralGenericModel>( cudaStream_t stream, const CUDADatasetAndState& s, const CUDACentralGenericModel& model, CUDABuffer_<PCGScalar>* pcg_r, CUDABuffer_<PCGScalar>* pcg_M, CUDABuffer_<PCGScalar>* pcg_cost); template<int block_width> __global__ void PCGInit2CUDAKernel( u32 unknown_count, PCGScalar lambda, CUDABuffer_<PCGScalar> pcg_r, CUDABuffer_<PCGScalar> pcg_M, CUDABuffer_<PCGScalar> pcg_delta, CUDABuffer_<PCGScalar> pcg_g, CUDABuffer_<PCGScalar> pcg_p, CUDABuffer_<PCGScalar> pcg_alpha_n) { unsigned int unknown_index = blockIdx.x * blockDim.x + threadIdx.x; constexpr int block_height = 1; typedef cub::BlockReduce<PCGScalar, block_width, cub::BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY, block_height> BlockReduceScalar; __shared__ typename BlockReduceScalar::TempStorage temp_storage; PCGScalar alpha_term; if (unknown_index < unknown_count) { pcg_g(0, unknown_index) = 0; // p_0 = M^-1 r_0 // The addition of lambda is also handled here. PCGScalar r_value = pcg_r(0, unknown_index); PCGScalar p_value = r_value / (pcg_M(0, unknown_index) + lambda); pcg_p(0, unknown_index) = p_value; // delta_0 = 0 pcg_delta(0, unknown_index) = 0; // alpha_n_0 = r_0^T p_0 alpha_term = r_value * p_value; } BlockedAtomicSum<block_width, block_height>( &pcg_alpha_n(0, 0), alpha_term, unknown_index < unknown_count, &temp_storage); } void PCGInit2CUDA( cudaStream_t stream, u32 unknown_count, PCGScalar lambda, const CUDABuffer_<PCGScalar>& pcg_r, const CUDABuffer_<PCGScalar>& pcg_M, CUDABuffer_<PCGScalar>* pcg_delta, CUDABuffer_<PCGScalar>* pcg_g, CUDABuffer_<PCGScalar>* pcg_p, CUDABuffer_<PCGScalar>* pcg_alpha_n) { CHECK_CUDA_NO_ERROR(); if (unknown_count == 0) { return; } cudaMemsetAsync(pcg_alpha_n->address(), 0, 1 * sizeof(PCGScalar), stream); CUDA_AUTO_TUNE_1D_TEMPLATED( PCGInit2CUDAKernel, 1024, unknown_count, 0, stream, TEMPLATE_ARGUMENTS(block_width), /* kernel parameters */ unknown_count, lambda, pcg_r, pcg_M, *pcg_delta, *pcg_g, *pcg_p, *pcg_alpha_n); CHECK_CUDA_NO_ERROR(); } template <int block_width, int block_height> struct PCGStep1SumAccumulator { __forceinline__ __device__ PCGStep1SumAccumulator(const CUDABuffer_<PCGScalar>& pcg_p) : pcg_p_(pcg_p) {} __forceinline__ __device__ void SetResidualsInvalid(PCGScalar* features_residual_x, u32 feature_index) { // Do nothing } __forceinline__ __device__ void SetResiduals(PCGScalar residual_x, PCGScalar residual_y, PCGScalar* features_residual_x, PCGScalar* features_residual_y, u32 feature_index) { // Cache weights weight_ = ComputeHuberWeight(residual_x, residual_y, kHuberWeight); } __forceinline__ __device__ void SetJacobianComponent_ThreadConflictFree(u32 index, PCGScalar jac_x, PCGScalar jac_y) { PCGScalar p = pcg_p_(0, index); sum_x_ += jac_x * p; sum_y_ += jac_y * p; } __forceinline__ __device__ void SetJacobianComponent_AllThreadsSameIndex(u32 index, PCGScalar jac_x, PCGScalar jac_y, bool valid) { if (valid) { PCGScalar p = pcg_p_(0, index); sum_x_ += jac_x * p; sum_y_ += jac_y * p; } } __forceinline__ __device__ void SetJacobianComponent_RandomThreadConflicts(u32 index, PCGScalar jac_x, PCGScalar jac_y) { PCGScalar p = pcg_p_(0, index); sum_x_ += jac_x * p; sum_y_ += jac_y * p; } PCGScalar sum_x_ = 0; // holds the result of (J * p) for the row of the first residual. PCGScalar sum_y_ = 0; // holds the result of (J * p) for the row of the second residual. PCGScalar weight_ = 0; const CUDABuffer_<PCGScalar>& pcg_p_; }; template <int block_width, int block_height> struct PCGStep1ResolveAccumulator { __forceinline__ __device__ PCGStep1ResolveAccumulator( PCGScalar sum_x, PCGScalar sum_y, CUDABuffer_<PCGScalar>* pcg_g, typename cub::BlockReduce<PCGScalar, block_width, cub::BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY, block_height>::TempStorage* temp_storage) : sum_x_(sum_x), sum_y_(sum_y), pcg_g_(pcg_g), temp_storage_(temp_storage) {} __forceinline__ __device__ void SetResidualsInvalid(PCGScalar* features_residual_x, u32 feature_index) { // Do nothing } __forceinline__ __device__ void SetResiduals(PCGScalar residual_x, PCGScalar residual_y, PCGScalar* features_residual_x, PCGScalar* features_residual_y, u32 feature_index) { // Do nothing } __forceinline__ __device__ void SetJacobianComponent_ThreadConflictFree(u32 index, PCGScalar jac_x, PCGScalar jac_y) { (*pcg_g_)(0, index) += jac_x * sum_x_ + jac_y * sum_y_; } __forceinline__ __device__ void SetJacobianComponent_AllThreadsSameIndex(u32 index, PCGScalar jac_x, PCGScalar jac_y, bool valid) { BlockedAtomicSum<block_width, block_height>( &(*pcg_g_)(0, index), jac_x * sum_x_ + jac_y * sum_y_, valid, temp_storage_); __syncthreads(); } __forceinline__ __device__ void SetJacobianComponent_RandomThreadConflicts(u32 index, PCGScalar jac_x, PCGScalar jac_y) { atomicAddFloatOrDouble( &(*pcg_g_)(0, index), jac_x * sum_x_ + jac_y * sum_y_); } PCGScalar sum_x_; PCGScalar sum_y_; CUDABuffer_<PCGScalar>* pcg_g_; typename cub::BlockReduce<PCGScalar, block_width, cub::BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY, block_height>::TempStorage* temp_storage_; }; template<int block_width, class Model, bool are_camera_tr_rig_in_state> __global__ void __launch_bounds__(/*maxThreadsPerBlock*/ kResidualJacobianBlockSize, /*minBlocksPerMultiprocessor*/ 1) PCGStep1CUDAKernel( CUDADatasetAndState s, Model model, CUDABuffer_<PCGScalar> pcg_p, CUDABuffer_<PCGScalar> pcg_g, CUDABuffer_<PCGScalar> pcg_alpha_d) { unsigned int feature_index = blockIdx.x * blockDim.x + threadIdx.x; bool valid = feature_index < s.num_features; if (!valid) { feature_index = 0; } constexpr int block_height = 1; typedef cub::BlockReduce<PCGScalar, block_width, cub::BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY, block_height> BlockReduceScalar; __shared__ typename BlockReduceScalar::TempStorage temp_storage; PCGScalar weight; PCGScalar sum_x; PCGScalar sum_y; { PCGStep1SumAccumulator<block_width, block_height> accumulator(pcg_p); ComputeResidualAndJacobian<block_width, block_height, /*compute_jacobians*/ true, are_camera_tr_rig_in_state>(valid, feature_index, s, model, accumulator); weight = accumulator.weight_; sum_x = accumulator.sum_x_; sum_y = accumulator.sum_y_; } BlockedAtomicSum<block_width, block_height>( &pcg_alpha_d(0, 0), sum_x * weight * sum_x + sum_y * weight * sum_y, valid, &temp_storage); sum_x *= weight; sum_y *= weight; __syncthreads(); // TODO: Try storing sum_x and sum_y in global memory here and moving the // part below into its own kernel. It might be faster since it might be // possible to run one of the two resulting kernels with higher // parallelism than the current large kernel. { PCGStep1ResolveAccumulator<block_width, block_height> accumulator(sum_x, sum_y, &pcg_g, &temp_storage); ComputeResidualAndJacobian<block_width, block_height, /*compute_jacobians*/ true, are_camera_tr_rig_in_state>(valid, feature_index, s, model, accumulator); } } template<int block_width> __global__ void AddAlphaDEpsilonTermsCUDAKernel( u32 unknown_count, PCGScalar lambda, CUDABuffer_<PCGScalar> pcg_p, CUDABuffer_<PCGScalar> pcg_alpha_d) { unsigned int unknown_index = blockIdx.x * blockDim.x + threadIdx.x; bool valid = unknown_index < unknown_count; if (!valid) { unknown_index = unknown_count - 1; } PCGScalar p_value = pcg_p(0, unknown_index); PCGScalar term = lambda * p_value * p_value; constexpr int block_height = 1; typedef cub::BlockReduce<PCGScalar, block_width, cub::BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY, block_height> BlockReduceScalar; __shared__ typename BlockReduceScalar::TempStorage temp_storage; BlockedAtomicSum<block_width, block_height>( &pcg_alpha_d(0, 0), term, valid, &temp_storage); } template <class Model> void PCGStep1CUDA( cudaStream_t stream, u32 unknown_count, const CUDADatasetAndState& s, const Model& model, CUDABuffer_<PCGScalar>* pcg_p, CUDABuffer_<PCGScalar>* pcg_g, CUDABuffer_<PCGScalar>* pcg_alpha_d) { CHECK_CUDA_NO_ERROR(); if (s.num_features == 0) { return; } constexpr int block_width = kResidualJacobianBlockSize; dim3 grid_dim(GetBlockCount(s.num_features, block_width)); dim3 block_dim(block_width); bool are_camera_tr_rig_in_state = s.are_camera_tr_rig_in_state; COMPILE_OPTION( are_camera_tr_rig_in_state, PCGStep1CUDAKernel<block_width, Model, _are_camera_tr_rig_in_state> <<<grid_dim, block_dim, 0, stream>>>( s, model, *pcg_p, *pcg_g, *pcg_alpha_d);); CHECK_CUDA_NO_ERROR(); } template void PCGStep1CUDA<CUDACentralGenericModel>( cudaStream_t stream, u32 unknown_count, const CUDADatasetAndState& s, const CUDACentralGenericModel& model, CUDABuffer_<PCGScalar>* pcg_p, CUDABuffer_<PCGScalar>* pcg_g, CUDABuffer_<PCGScalar>* pcg_alpha_d); template<int block_width> __global__ void PCGStep2CUDAKernel( u32 unknown_count, PCGScalar lambda, CUDABuffer_<PCGScalar> pcg_r, CUDABuffer_<PCGScalar> pcg_M, CUDABuffer_<PCGScalar> pcg_delta, CUDABuffer_<PCGScalar> pcg_g, CUDABuffer_<PCGScalar> pcg_p, CUDABuffer_<PCGScalar> pcg_alpha_n, CUDABuffer_<PCGScalar> pcg_alpha_d, CUDABuffer_<PCGScalar> pcg_beta_n) { unsigned int unknown_index = blockIdx.x * blockDim.x + threadIdx.x; PCGScalar beta_term; constexpr int block_height = 1; typedef cub::BlockReduce<PCGScalar, block_width, cub::BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY, block_height> BlockReduceScalar; __shared__ typename BlockReduceScalar::TempStorage temp_storage; if (unknown_index < unknown_count) { // TODO: Default to 1 or to 0 if denominator is near-zero? Stop optimization if that happens? PCGScalar alpha = (pcg_alpha_d(0, 0) >= 1e-35f) ? (pcg_alpha_n(0, 0) / pcg_alpha_d(0, 0)) : 0; PCGScalar p_value = pcg_p(0, unknown_index); pcg_delta(0, unknown_index) += alpha * p_value; PCGScalar r_value = pcg_r(0, unknown_index); r_value -= alpha * (pcg_g(0, unknown_index) + lambda * p_value); pcg_r(0, unknown_index) = r_value; // This is called z in the Opt paper, but stored in g here to save memory. PCGScalar z_value = r_value / (pcg_M(0, unknown_index) + lambda); pcg_g(0, unknown_index) = z_value; beta_term = z_value * r_value; } BlockedAtomicSum<block_width, block_height>( &pcg_beta_n(0, 0), beta_term, unknown_index < unknown_count, &temp_storage); } void PCGStep2CUDA( cudaStream_t stream, u32 unknown_count, PCGScalar lambda, CUDABuffer_<PCGScalar>* pcg_r, const CUDABuffer_<PCGScalar>& pcg_M, CUDABuffer_<PCGScalar>* pcg_delta, CUDABuffer_<PCGScalar>* pcg_g, CUDABuffer_<PCGScalar>* pcg_p, CUDABuffer_<PCGScalar>* pcg_alpha_n, CUDABuffer_<PCGScalar>* pcg_alpha_d, CUDABuffer_<PCGScalar>* pcg_beta_n) { CHECK_CUDA_NO_ERROR(); if (unknown_count == 0) { return; } CUDA_AUTO_TUNE_1D_TEMPLATED( AddAlphaDEpsilonTermsCUDAKernel, 1024, unknown_count, 0, stream, TEMPLATE_ARGUMENTS(block_width), /* kernel parameters */ unknown_count, lambda, *pcg_p, *pcg_alpha_d); CHECK_CUDA_NO_ERROR(); cudaMemsetAsync(pcg_beta_n->address(), 0, 1 * sizeof(PCGScalar), stream); CUDA_AUTO_TUNE_1D_TEMPLATED( PCGStep2CUDAKernel, 1024, unknown_count, 0, stream, TEMPLATE_ARGUMENTS(block_width), /* kernel parameters */ unknown_count, lambda, *pcg_r, pcg_M, *pcg_delta, *pcg_g, *pcg_p, *pcg_alpha_n, *pcg_alpha_d, *pcg_beta_n); CHECK_CUDA_NO_ERROR(); } template<int block_width> __global__ void PCGStep3CUDAKernel( u32 unknown_count, CUDABuffer_<PCGScalar> pcg_g, CUDABuffer_<PCGScalar> pcg_p, CUDABuffer_<PCGScalar> pcg_alpha_n, CUDABuffer_<PCGScalar> pcg_beta_n) { unsigned int unknown_index = blockIdx.x * blockDim.x + threadIdx.x; if (unknown_index < unknown_count) { // TODO: Default to 1 or to 0 if denominator is near-zero? Stop optimization if that happens? PCGScalar beta = (pcg_alpha_n(0, 0) >= 1e-35f) ? (pcg_beta_n(0, 0) / pcg_alpha_n(0, 0)) : 0; pcg_p(0, unknown_index) = pcg_g/*z*/(0, unknown_index) + beta * pcg_p(0, unknown_index); } } void PCGStep3CUDA( cudaStream_t stream, u32 unknown_count, CUDABuffer_<PCGScalar>* pcg_g, CUDABuffer_<PCGScalar>* pcg_p, CUDABuffer_<PCGScalar>* pcg_alpha_n, CUDABuffer_<PCGScalar>* pcg_beta_n) { CHECK_CUDA_NO_ERROR(); if (unknown_count == 0) { return; } CUDA_AUTO_TUNE_1D_TEMPLATED( PCGStep3CUDAKernel, 1024, unknown_count, 0, stream, TEMPLATE_ARGUMENTS(block_width), /* kernel parameters */ unknown_count, *pcg_g, *pcg_p, *pcg_alpha_n, *pcg_beta_n); CHECK_CUDA_NO_ERROR(); } }
92b6a20bed040dd15df61773c0d7a0ed10f0b3b4.hip
// !!! This is a file automatically generated by hipify!!! #include "HostArray.h" #include <hip/hip_runtime.h> #include "error.h" namespace cutw { namespace detail { void host_allocate(void*& data, const std::size_t bytes) { CUTW_CUASSERT(hipHostMalloc(&data, bytes)); } void host_free(void* const data) { CUTW_CUASSERT(hipHostFree(data)); } } }
92b6a20bed040dd15df61773c0d7a0ed10f0b3b4.cu
#include "HostArray.h" #include <cuda.h> #include "error.h" namespace cutw { namespace detail { void host_allocate(void*& data, const std::size_t bytes) { CUTW_CUASSERT(cudaMallocHost(&data, bytes)); } void host_free(void* const data) { CUTW_CUASSERT(cudaFreeHost(data)); } } }
ec88944e52bd7ff8db274ed94c6fa9fed269f3a9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<cuda.h> #include<iostream> #include<stdio.h> __global__ void factorialKernel() { //this adds a value to a variable stored in global memory int factorial = 1; int n = threadIdx.x+1; for(int i = 1; i <= n; ++i) { factorial *= i; } printf("%d!=%d\n", n, factorial); } int main() { //invoke GPU kernel, with one block that has four threads hipLaunchKernelGGL(( factorialKernel), dim3(1),dim3(8), 0, 0, ); hipDeviceSynchronize(); //bring the result back from the GPU into the hostArray // hipMemcpy(&hostArray, devArray, sizeof(int) * numElems, hipMemcpyDeviceToHost); // print out the result to confirm that things are looking good //std::printf("here\n"); //release the memory allocated on the GPU //hipFree(devArray); return 0; }
ec88944e52bd7ff8db274ed94c6fa9fed269f3a9.cu
#include<cuda.h> #include<iostream> #include<stdio.h> __global__ void factorialKernel() { //this adds a value to a variable stored in global memory int factorial = 1; int n = threadIdx.x+1; for(int i = 1; i <= n; ++i) { factorial *= i; } printf("%d!=%d\n", n, factorial); } int main() { //invoke GPU kernel, with one block that has four threads factorialKernel<<<1,8>>>(); cudaDeviceSynchronize(); //bring the result back from the GPU into the hostArray // cudaMemcpy(&hostArray, devArray, sizeof(int) * numElems, cudaMemcpyDeviceToHost); // print out the result to confirm that things are looking good //std::printf("here\n"); //release the memory allocated on the GPU //cudaFree(devArray); return 0; }
2a58d91898c5dd5ce92c39434b1da94bd39a0c0b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*! * Copyright 2017-2018 by Contributors */ #include <thrust/copy.h> #include <thrust/device_ptr.h> #include <thrust/device_vector.h> #include <thrust/fill.h> #include <memory> #include "xgboost/parameter.h" #include "xgboost/data.h" #include "xgboost/predictor.h" #include "xgboost/tree_model.h" #include "xgboost/tree_updater.h" #include "xgboost/host_device_vector.h" #include "../gbm/gbtree_model.h" #include "../common/common.h" #include "../common/device_helpers.cuh" namespace xgboost { namespace predictor { DMLC_REGISTRY_FILE_TAG(gpu_predictor); /** * \struct DevicePredictionNode * * \brief Packed 16 byte representation of a tree node for use in device * prediction */ struct DevicePredictionNode { XGBOOST_DEVICE DevicePredictionNode() : fidx{-1}, left_child_idx{-1}, right_child_idx{-1} {} union NodeValue { float leaf_weight; float fvalue; }; int fidx; int left_child_idx; int right_child_idx; NodeValue val{}; DevicePredictionNode(const RegTree::Node& n) { // NOLINT static_assert(sizeof(DevicePredictionNode) == 16, "Size is not 16 bytes"); this->left_child_idx = n.LeftChild(); this->right_child_idx = n.RightChild(); this->fidx = n.SplitIndex(); if (n.DefaultLeft()) { fidx |= (1U << 31); } if (n.IsLeaf()) { this->val.leaf_weight = n.LeafValue(); } else { this->val.fvalue = n.SplitCond(); } } XGBOOST_DEVICE bool IsLeaf() const { return left_child_idx == -1; } XGBOOST_DEVICE int GetFidx() const { return fidx & ((1U << 31) - 1U); } XGBOOST_DEVICE bool MissingLeft() const { return (fidx >> 31) != 0; } XGBOOST_DEVICE int MissingIdx() const { if (MissingLeft()) { return this->left_child_idx; } else { return this->right_child_idx; } } XGBOOST_DEVICE float GetFvalue() const { return val.fvalue; } XGBOOST_DEVICE float GetWeight() const { return val.leaf_weight; } }; struct ElementLoader { bool use_shared; common::Span<const bst_row_t> d_row_ptr; common::Span<const Entry> d_data; int num_features; float* smem; size_t entry_start; __device__ ElementLoader(bool use_shared, common::Span<const bst_row_t> row_ptr, common::Span<const Entry> entry, int num_features, float* smem, int num_rows, size_t entry_start) : use_shared(use_shared), d_row_ptr(row_ptr), d_data(entry), num_features(num_features), smem(smem), entry_start(entry_start) { // Copy instances if (use_shared) { bst_uint global_idx = blockDim.x * blockIdx.x + threadIdx.x; int shared_elements = blockDim.x * num_features; dh::BlockFill(smem, shared_elements, nanf("")); __syncthreads(); if (global_idx < num_rows) { bst_uint elem_begin = d_row_ptr[global_idx]; bst_uint elem_end = d_row_ptr[global_idx + 1]; for (bst_uint elem_idx = elem_begin; elem_idx < elem_end; elem_idx++) { Entry elem = d_data[elem_idx - entry_start]; smem[threadIdx.x * num_features + elem.index] = elem.fvalue; } } __syncthreads(); } } __device__ float GetFvalue(int ridx, int fidx) { if (use_shared) { return smem[threadIdx.x * num_features + fidx]; } else { // Binary search auto begin_ptr = d_data.begin() + (d_row_ptr[ridx] - entry_start); auto end_ptr = d_data.begin() + (d_row_ptr[ridx + 1] - entry_start); common::Span<const Entry>::iterator previous_middle; while (end_ptr != begin_ptr) { auto middle = begin_ptr + (end_ptr - begin_ptr) / 2; if (middle == previous_middle) { break; } else { previous_middle = middle; } if (middle->index == fidx) { return middle->fvalue; } else if (middle->index < fidx) { begin_ptr = middle; } else { end_ptr = middle; } } // Value is missing return nanf(""); } } }; __device__ float GetLeafWeight(bst_uint ridx, const DevicePredictionNode* tree, ElementLoader* loader) { DevicePredictionNode n = tree[0]; while (!n.IsLeaf()) { float fvalue = loader->GetFvalue(ridx, n.GetFidx()); // Missing value if (isnan(fvalue)) { n = tree[n.MissingIdx()]; } else { if (fvalue < n.GetFvalue()) { n = tree[n.left_child_idx]; } else { n = tree[n.right_child_idx]; } } } return n.GetWeight(); } template <int BLOCK_THREADS> __global__ void PredictKernel(common::Span<const DevicePredictionNode> d_nodes, common::Span<float> d_out_predictions, common::Span<size_t> d_tree_segments, common::Span<int> d_tree_group, common::Span<const bst_row_t> d_row_ptr, common::Span<const Entry> d_data, size_t tree_begin, size_t tree_end, size_t num_features, size_t num_rows, size_t entry_start, bool use_shared, int num_group) { extern __shared__ float smem[]; bst_uint global_idx = blockDim.x * blockIdx.x + threadIdx.x; ElementLoader loader(use_shared, d_row_ptr, d_data, num_features, smem, num_rows, entry_start); if (global_idx >= num_rows) return; if (num_group == 1) { float sum = 0; for (int tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) { const DevicePredictionNode* d_tree = &d_nodes[d_tree_segments[tree_idx - tree_begin]]; sum += GetLeafWeight(global_idx, d_tree, &loader); } d_out_predictions[global_idx] += sum; } else { for (int tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) { int tree_group = d_tree_group[tree_idx]; const DevicePredictionNode* d_tree = &d_nodes[d_tree_segments[tree_idx - tree_begin]]; bst_uint out_prediction_idx = global_idx * num_group + tree_group; d_out_predictions[out_prediction_idx] += GetLeafWeight(global_idx, d_tree, &loader); } } } class GPUPredictor : public xgboost::Predictor { private: void InitModel(const gbm::GBTreeModel& model, const thrust::host_vector<size_t>& h_tree_segments, const thrust::host_vector<DevicePredictionNode>& h_nodes, size_t tree_begin, size_t tree_end) { dh::safe_cuda(hipSetDevice(device_)); nodes_.resize(h_nodes.size()); dh::safe_cuda(hipMemcpyAsync(nodes_.data().get(), h_nodes.data(), sizeof(DevicePredictionNode) * h_nodes.size(), hipMemcpyHostToDevice)); tree_segments_.resize(h_tree_segments.size()); dh::safe_cuda(hipMemcpyAsync(tree_segments_.data().get(), h_tree_segments.data(), sizeof(size_t) * h_tree_segments.size(), hipMemcpyHostToDevice)); tree_group_.resize(model.tree_info.size()); dh::safe_cuda(hipMemcpyAsync(tree_group_.data().get(), model.tree_info.data(), sizeof(int) * model.tree_info.size(), hipMemcpyHostToDevice)); this->tree_begin_ = tree_begin; this->tree_end_ = tree_end; this->num_group_ = model.param.num_output_group; } void PredictInternal(const SparsePage& batch, size_t num_features, HostDeviceVector<bst_float>* predictions, size_t batch_offset) { dh::safe_cuda(hipSetDevice(device_)); const uint32_t BLOCK_THREADS = 128; size_t num_rows = batch.Size(); auto GRID_SIZE = static_cast<uint32_t>(common::DivRoundUp(num_rows, BLOCK_THREADS)); auto shared_memory_bytes = static_cast<size_t>(sizeof(float) * num_features * BLOCK_THREADS); bool use_shared = true; if (shared_memory_bytes > max_shared_memory_bytes_) { shared_memory_bytes = 0; use_shared = false; } size_t entry_start = 0; dh::LaunchKernel {GRID_SIZE, BLOCK_THREADS, shared_memory_bytes} ( PredictKernel<BLOCK_THREADS>, dh::ToSpan(nodes_), predictions->DeviceSpan().subspan(batch_offset), dh::ToSpan(tree_segments_), dh::ToSpan(tree_group_), batch.offset.DeviceSpan(), batch.data.DeviceSpan(), this->tree_begin_, this->tree_end_, num_features, num_rows, entry_start, use_shared, this->num_group_); } void InitModel(const gbm::GBTreeModel& model, size_t tree_begin, size_t tree_end) { CHECK_EQ(model.param.size_leaf_vector, 0); // Copy decision trees to device thrust::host_vector<size_t> h_tree_segments{}; h_tree_segments.reserve((tree_end - tree_begin) + 1); size_t sum = 0; h_tree_segments.push_back(sum); for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) { sum += model.trees.at(tree_idx)->GetNodes().size(); h_tree_segments.push_back(sum); } thrust::host_vector<DevicePredictionNode> h_nodes(h_tree_segments.back()); for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) { auto& src_nodes = model.trees.at(tree_idx)->GetNodes(); std::copy(src_nodes.begin(), src_nodes.end(), h_nodes.begin() + h_tree_segments[tree_idx - tree_begin]); } InitModel(model, h_tree_segments, h_nodes, tree_begin, tree_end); } void DevicePredictInternal(DMatrix* dmat, HostDeviceVector<bst_float>* out_preds, const gbm::GBTreeModel& model, size_t tree_begin, size_t tree_end) { if (tree_end - tree_begin == 0) { return; } monitor_.StartCuda("DevicePredictInternal"); InitModel(model, tree_begin, tree_end); size_t batch_offset = 0; for (auto &batch : dmat->GetBatches<SparsePage>()) { batch.offset.SetDevice(device_); batch.data.SetDevice(device_); PredictInternal(batch, model.param.num_feature, out_preds, batch_offset); batch_offset += batch.Size() * model.param.num_output_group; } monitor_.StopCuda("DevicePredictInternal"); } public: GPUPredictor() : device_{-1} {} ~GPUPredictor() override { if (device_ >= 0) { dh::safe_cuda(hipSetDevice(device_)); } } void PredictBatch(DMatrix* dmat, HostDeviceVector<bst_float>* out_preds, const gbm::GBTreeModel& model, int tree_begin, unsigned ntree_limit = 0) override { int device = learner_param_->gpu_id; CHECK_GE(device, 0); ConfigureDevice(device); if (this->PredictFromCache(dmat, out_preds, model, ntree_limit)) { return; } this->InitOutPredictions(dmat->Info(), out_preds, model); int tree_end = ntree_limit * model.param.num_output_group; if (ntree_limit == 0 || ntree_limit > model.trees.size()) { tree_end = static_cast<unsigned>(model.trees.size()); } DevicePredictInternal(dmat, out_preds, model, tree_begin, tree_end); } protected: void InitOutPredictions(const MetaInfo& info, HostDeviceVector<bst_float>* out_preds, const gbm::GBTreeModel& model) const { size_t n_classes = model.param.num_output_group; size_t n = n_classes * info.num_row_; const HostDeviceVector<bst_float>& base_margin = info.base_margin_; out_preds->SetDevice(device_); out_preds->Resize(n); if (base_margin.Size() != 0) { CHECK_EQ(base_margin.Size(), n); out_preds->Copy(base_margin); } else { out_preds->Fill(model.base_margin); } } bool PredictFromCache(DMatrix* dmat, HostDeviceVector<bst_float>* out_preds, const gbm::GBTreeModel& model, unsigned ntree_limit) { if (ntree_limit == 0 || ntree_limit * model.param.num_output_group >= model.trees.size()) { auto it = cache_.find(dmat); if (it != cache_.end()) { const HostDeviceVector<bst_float>& y = it->second.predictions; if (y.Size() != 0) { monitor_.StartCuda("PredictFromCache"); out_preds->SetDevice(y.DeviceIdx()); out_preds->Resize(y.Size()); out_preds->Copy(y); monitor_.StopCuda("PredictFromCache"); return true; } } } return false; } void UpdatePredictionCache( const gbm::GBTreeModel& model, std::vector<std::unique_ptr<TreeUpdater>>* updaters, int num_new_trees) override { auto old_ntree = model.trees.size() - num_new_trees; // update cache entry for (auto& kv : cache_) { PredictionCacheEntry& e = kv.second; DMatrix* dmat = kv.first; HostDeviceVector<bst_float>& predictions = e.predictions; if (predictions.Size() == 0) { this->InitOutPredictions(dmat->Info(), &predictions, model); } if (model.param.num_output_group == 1 && updaters->size() > 0 && num_new_trees == 1 && updaters->back()->UpdatePredictionCache(e.data.get(), &predictions)) { // do nothing } else { DevicePredictInternal(dmat, &predictions, model, old_ntree, model.trees.size()); } } } void PredictInstance(const SparsePage::Inst& inst, std::vector<bst_float>* out_preds, const gbm::GBTreeModel& model, unsigned ntree_limit) override { LOG(FATAL) << "Internal error: " << __func__ << " is not implemented in GPU Predictor."; } void PredictLeaf(DMatrix* p_fmat, std::vector<bst_float>* out_preds, const gbm::GBTreeModel& model, unsigned ntree_limit) override { LOG(FATAL) << "Internal error: " << __func__ << " is not implemented in GPU Predictor."; } void PredictContribution(DMatrix* p_fmat, std::vector<bst_float>* out_contribs, const gbm::GBTreeModel& model, unsigned ntree_limit, std::vector<bst_float>* tree_weights, bool approximate, int condition, unsigned condition_feature) override { LOG(FATAL) << "Internal error: " << __func__ << " is not implemented in GPU Predictor."; } void PredictInteractionContributions(DMatrix* p_fmat, std::vector<bst_float>* out_contribs, const gbm::GBTreeModel& model, unsigned ntree_limit, std::vector<bst_float>* tree_weights, bool approximate) override { LOG(FATAL) << "Internal error: " << __func__ << " is not implemented in GPU Predictor."; } void Configure(const std::vector<std::pair<std::string, std::string>>& cfg, const std::vector<std::shared_ptr<DMatrix>>& cache) override { Predictor::Configure(cfg, cache); int device = learner_param_->gpu_id; if (device >= 0) { ConfigureDevice(device); } } private: /*! \brief Reconfigure the device when GPU is changed. */ void ConfigureDevice(int device) { if (device_ == device) return; device_ = device; if (device_ >= 0) { max_shared_memory_bytes_ = dh::MaxSharedMemory(device_); } } int device_; common::Monitor monitor_; dh::device_vector<DevicePredictionNode> nodes_; dh::device_vector<size_t> tree_segments_; dh::device_vector<int> tree_group_; size_t max_shared_memory_bytes_; size_t tree_begin_; size_t tree_end_; int num_group_; }; XGBOOST_REGISTER_PREDICTOR(GPUPredictor, "gpu_predictor") .describe("Make predictions using GPU.") .set_body([]() { return new GPUPredictor(); }); } // namespace predictor } // namespace xgboost
2a58d91898c5dd5ce92c39434b1da94bd39a0c0b.cu
/*! * Copyright 2017-2018 by Contributors */ #include <thrust/copy.h> #include <thrust/device_ptr.h> #include <thrust/device_vector.h> #include <thrust/fill.h> #include <memory> #include "xgboost/parameter.h" #include "xgboost/data.h" #include "xgboost/predictor.h" #include "xgboost/tree_model.h" #include "xgboost/tree_updater.h" #include "xgboost/host_device_vector.h" #include "../gbm/gbtree_model.h" #include "../common/common.h" #include "../common/device_helpers.cuh" namespace xgboost { namespace predictor { DMLC_REGISTRY_FILE_TAG(gpu_predictor); /** * \struct DevicePredictionNode * * \brief Packed 16 byte representation of a tree node for use in device * prediction */ struct DevicePredictionNode { XGBOOST_DEVICE DevicePredictionNode() : fidx{-1}, left_child_idx{-1}, right_child_idx{-1} {} union NodeValue { float leaf_weight; float fvalue; }; int fidx; int left_child_idx; int right_child_idx; NodeValue val{}; DevicePredictionNode(const RegTree::Node& n) { // NOLINT static_assert(sizeof(DevicePredictionNode) == 16, "Size is not 16 bytes"); this->left_child_idx = n.LeftChild(); this->right_child_idx = n.RightChild(); this->fidx = n.SplitIndex(); if (n.DefaultLeft()) { fidx |= (1U << 31); } if (n.IsLeaf()) { this->val.leaf_weight = n.LeafValue(); } else { this->val.fvalue = n.SplitCond(); } } XGBOOST_DEVICE bool IsLeaf() const { return left_child_idx == -1; } XGBOOST_DEVICE int GetFidx() const { return fidx & ((1U << 31) - 1U); } XGBOOST_DEVICE bool MissingLeft() const { return (fidx >> 31) != 0; } XGBOOST_DEVICE int MissingIdx() const { if (MissingLeft()) { return this->left_child_idx; } else { return this->right_child_idx; } } XGBOOST_DEVICE float GetFvalue() const { return val.fvalue; } XGBOOST_DEVICE float GetWeight() const { return val.leaf_weight; } }; struct ElementLoader { bool use_shared; common::Span<const bst_row_t> d_row_ptr; common::Span<const Entry> d_data; int num_features; float* smem; size_t entry_start; __device__ ElementLoader(bool use_shared, common::Span<const bst_row_t> row_ptr, common::Span<const Entry> entry, int num_features, float* smem, int num_rows, size_t entry_start) : use_shared(use_shared), d_row_ptr(row_ptr), d_data(entry), num_features(num_features), smem(smem), entry_start(entry_start) { // Copy instances if (use_shared) { bst_uint global_idx = blockDim.x * blockIdx.x + threadIdx.x; int shared_elements = blockDim.x * num_features; dh::BlockFill(smem, shared_elements, nanf("")); __syncthreads(); if (global_idx < num_rows) { bst_uint elem_begin = d_row_ptr[global_idx]; bst_uint elem_end = d_row_ptr[global_idx + 1]; for (bst_uint elem_idx = elem_begin; elem_idx < elem_end; elem_idx++) { Entry elem = d_data[elem_idx - entry_start]; smem[threadIdx.x * num_features + elem.index] = elem.fvalue; } } __syncthreads(); } } __device__ float GetFvalue(int ridx, int fidx) { if (use_shared) { return smem[threadIdx.x * num_features + fidx]; } else { // Binary search auto begin_ptr = d_data.begin() + (d_row_ptr[ridx] - entry_start); auto end_ptr = d_data.begin() + (d_row_ptr[ridx + 1] - entry_start); common::Span<const Entry>::iterator previous_middle; while (end_ptr != begin_ptr) { auto middle = begin_ptr + (end_ptr - begin_ptr) / 2; if (middle == previous_middle) { break; } else { previous_middle = middle; } if (middle->index == fidx) { return middle->fvalue; } else if (middle->index < fidx) { begin_ptr = middle; } else { end_ptr = middle; } } // Value is missing return nanf(""); } } }; __device__ float GetLeafWeight(bst_uint ridx, const DevicePredictionNode* tree, ElementLoader* loader) { DevicePredictionNode n = tree[0]; while (!n.IsLeaf()) { float fvalue = loader->GetFvalue(ridx, n.GetFidx()); // Missing value if (isnan(fvalue)) { n = tree[n.MissingIdx()]; } else { if (fvalue < n.GetFvalue()) { n = tree[n.left_child_idx]; } else { n = tree[n.right_child_idx]; } } } return n.GetWeight(); } template <int BLOCK_THREADS> __global__ void PredictKernel(common::Span<const DevicePredictionNode> d_nodes, common::Span<float> d_out_predictions, common::Span<size_t> d_tree_segments, common::Span<int> d_tree_group, common::Span<const bst_row_t> d_row_ptr, common::Span<const Entry> d_data, size_t tree_begin, size_t tree_end, size_t num_features, size_t num_rows, size_t entry_start, bool use_shared, int num_group) { extern __shared__ float smem[]; bst_uint global_idx = blockDim.x * blockIdx.x + threadIdx.x; ElementLoader loader(use_shared, d_row_ptr, d_data, num_features, smem, num_rows, entry_start); if (global_idx >= num_rows) return; if (num_group == 1) { float sum = 0; for (int tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) { const DevicePredictionNode* d_tree = &d_nodes[d_tree_segments[tree_idx - tree_begin]]; sum += GetLeafWeight(global_idx, d_tree, &loader); } d_out_predictions[global_idx] += sum; } else { for (int tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) { int tree_group = d_tree_group[tree_idx]; const DevicePredictionNode* d_tree = &d_nodes[d_tree_segments[tree_idx - tree_begin]]; bst_uint out_prediction_idx = global_idx * num_group + tree_group; d_out_predictions[out_prediction_idx] += GetLeafWeight(global_idx, d_tree, &loader); } } } class GPUPredictor : public xgboost::Predictor { private: void InitModel(const gbm::GBTreeModel& model, const thrust::host_vector<size_t>& h_tree_segments, const thrust::host_vector<DevicePredictionNode>& h_nodes, size_t tree_begin, size_t tree_end) { dh::safe_cuda(cudaSetDevice(device_)); nodes_.resize(h_nodes.size()); dh::safe_cuda(cudaMemcpyAsync(nodes_.data().get(), h_nodes.data(), sizeof(DevicePredictionNode) * h_nodes.size(), cudaMemcpyHostToDevice)); tree_segments_.resize(h_tree_segments.size()); dh::safe_cuda(cudaMemcpyAsync(tree_segments_.data().get(), h_tree_segments.data(), sizeof(size_t) * h_tree_segments.size(), cudaMemcpyHostToDevice)); tree_group_.resize(model.tree_info.size()); dh::safe_cuda(cudaMemcpyAsync(tree_group_.data().get(), model.tree_info.data(), sizeof(int) * model.tree_info.size(), cudaMemcpyHostToDevice)); this->tree_begin_ = tree_begin; this->tree_end_ = tree_end; this->num_group_ = model.param.num_output_group; } void PredictInternal(const SparsePage& batch, size_t num_features, HostDeviceVector<bst_float>* predictions, size_t batch_offset) { dh::safe_cuda(cudaSetDevice(device_)); const uint32_t BLOCK_THREADS = 128; size_t num_rows = batch.Size(); auto GRID_SIZE = static_cast<uint32_t>(common::DivRoundUp(num_rows, BLOCK_THREADS)); auto shared_memory_bytes = static_cast<size_t>(sizeof(float) * num_features * BLOCK_THREADS); bool use_shared = true; if (shared_memory_bytes > max_shared_memory_bytes_) { shared_memory_bytes = 0; use_shared = false; } size_t entry_start = 0; dh::LaunchKernel {GRID_SIZE, BLOCK_THREADS, shared_memory_bytes} ( PredictKernel<BLOCK_THREADS>, dh::ToSpan(nodes_), predictions->DeviceSpan().subspan(batch_offset), dh::ToSpan(tree_segments_), dh::ToSpan(tree_group_), batch.offset.DeviceSpan(), batch.data.DeviceSpan(), this->tree_begin_, this->tree_end_, num_features, num_rows, entry_start, use_shared, this->num_group_); } void InitModel(const gbm::GBTreeModel& model, size_t tree_begin, size_t tree_end) { CHECK_EQ(model.param.size_leaf_vector, 0); // Copy decision trees to device thrust::host_vector<size_t> h_tree_segments{}; h_tree_segments.reserve((tree_end - tree_begin) + 1); size_t sum = 0; h_tree_segments.push_back(sum); for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) { sum += model.trees.at(tree_idx)->GetNodes().size(); h_tree_segments.push_back(sum); } thrust::host_vector<DevicePredictionNode> h_nodes(h_tree_segments.back()); for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) { auto& src_nodes = model.trees.at(tree_idx)->GetNodes(); std::copy(src_nodes.begin(), src_nodes.end(), h_nodes.begin() + h_tree_segments[tree_idx - tree_begin]); } InitModel(model, h_tree_segments, h_nodes, tree_begin, tree_end); } void DevicePredictInternal(DMatrix* dmat, HostDeviceVector<bst_float>* out_preds, const gbm::GBTreeModel& model, size_t tree_begin, size_t tree_end) { if (tree_end - tree_begin == 0) { return; } monitor_.StartCuda("DevicePredictInternal"); InitModel(model, tree_begin, tree_end); size_t batch_offset = 0; for (auto &batch : dmat->GetBatches<SparsePage>()) { batch.offset.SetDevice(device_); batch.data.SetDevice(device_); PredictInternal(batch, model.param.num_feature, out_preds, batch_offset); batch_offset += batch.Size() * model.param.num_output_group; } monitor_.StopCuda("DevicePredictInternal"); } public: GPUPredictor() : device_{-1} {} ~GPUPredictor() override { if (device_ >= 0) { dh::safe_cuda(cudaSetDevice(device_)); } } void PredictBatch(DMatrix* dmat, HostDeviceVector<bst_float>* out_preds, const gbm::GBTreeModel& model, int tree_begin, unsigned ntree_limit = 0) override { int device = learner_param_->gpu_id; CHECK_GE(device, 0); ConfigureDevice(device); if (this->PredictFromCache(dmat, out_preds, model, ntree_limit)) { return; } this->InitOutPredictions(dmat->Info(), out_preds, model); int tree_end = ntree_limit * model.param.num_output_group; if (ntree_limit == 0 || ntree_limit > model.trees.size()) { tree_end = static_cast<unsigned>(model.trees.size()); } DevicePredictInternal(dmat, out_preds, model, tree_begin, tree_end); } protected: void InitOutPredictions(const MetaInfo& info, HostDeviceVector<bst_float>* out_preds, const gbm::GBTreeModel& model) const { size_t n_classes = model.param.num_output_group; size_t n = n_classes * info.num_row_; const HostDeviceVector<bst_float>& base_margin = info.base_margin_; out_preds->SetDevice(device_); out_preds->Resize(n); if (base_margin.Size() != 0) { CHECK_EQ(base_margin.Size(), n); out_preds->Copy(base_margin); } else { out_preds->Fill(model.base_margin); } } bool PredictFromCache(DMatrix* dmat, HostDeviceVector<bst_float>* out_preds, const gbm::GBTreeModel& model, unsigned ntree_limit) { if (ntree_limit == 0 || ntree_limit * model.param.num_output_group >= model.trees.size()) { auto it = cache_.find(dmat); if (it != cache_.end()) { const HostDeviceVector<bst_float>& y = it->second.predictions; if (y.Size() != 0) { monitor_.StartCuda("PredictFromCache"); out_preds->SetDevice(y.DeviceIdx()); out_preds->Resize(y.Size()); out_preds->Copy(y); monitor_.StopCuda("PredictFromCache"); return true; } } } return false; } void UpdatePredictionCache( const gbm::GBTreeModel& model, std::vector<std::unique_ptr<TreeUpdater>>* updaters, int num_new_trees) override { auto old_ntree = model.trees.size() - num_new_trees; // update cache entry for (auto& kv : cache_) { PredictionCacheEntry& e = kv.second; DMatrix* dmat = kv.first; HostDeviceVector<bst_float>& predictions = e.predictions; if (predictions.Size() == 0) { this->InitOutPredictions(dmat->Info(), &predictions, model); } if (model.param.num_output_group == 1 && updaters->size() > 0 && num_new_trees == 1 && updaters->back()->UpdatePredictionCache(e.data.get(), &predictions)) { // do nothing } else { DevicePredictInternal(dmat, &predictions, model, old_ntree, model.trees.size()); } } } void PredictInstance(const SparsePage::Inst& inst, std::vector<bst_float>* out_preds, const gbm::GBTreeModel& model, unsigned ntree_limit) override { LOG(FATAL) << "Internal error: " << __func__ << " is not implemented in GPU Predictor."; } void PredictLeaf(DMatrix* p_fmat, std::vector<bst_float>* out_preds, const gbm::GBTreeModel& model, unsigned ntree_limit) override { LOG(FATAL) << "Internal error: " << __func__ << " is not implemented in GPU Predictor."; } void PredictContribution(DMatrix* p_fmat, std::vector<bst_float>* out_contribs, const gbm::GBTreeModel& model, unsigned ntree_limit, std::vector<bst_float>* tree_weights, bool approximate, int condition, unsigned condition_feature) override { LOG(FATAL) << "Internal error: " << __func__ << " is not implemented in GPU Predictor."; } void PredictInteractionContributions(DMatrix* p_fmat, std::vector<bst_float>* out_contribs, const gbm::GBTreeModel& model, unsigned ntree_limit, std::vector<bst_float>* tree_weights, bool approximate) override { LOG(FATAL) << "Internal error: " << __func__ << " is not implemented in GPU Predictor."; } void Configure(const std::vector<std::pair<std::string, std::string>>& cfg, const std::vector<std::shared_ptr<DMatrix>>& cache) override { Predictor::Configure(cfg, cache); int device = learner_param_->gpu_id; if (device >= 0) { ConfigureDevice(device); } } private: /*! \brief Reconfigure the device when GPU is changed. */ void ConfigureDevice(int device) { if (device_ == device) return; device_ = device; if (device_ >= 0) { max_shared_memory_bytes_ = dh::MaxSharedMemory(device_); } } int device_; common::Monitor monitor_; dh::device_vector<DevicePredictionNode> nodes_; dh::device_vector<size_t> tree_segments_; dh::device_vector<int> tree_group_; size_t max_shared_memory_bytes_; size_t tree_begin_; size_t tree_end_; int num_group_; }; XGBOOST_REGISTER_PREDICTOR(GPUPredictor, "gpu_predictor") .describe("Make predictions using GPU.") .set_body([]() { return new GPUPredictor(); }); } // namespace predictor } // namespace xgboost
62e8c154c16ab632eb0b2e4bbd993a389baf30fc.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "matrixAdd.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *a = NULL; hipMalloc(&a, XSIZE*YSIZE); int *b = NULL; hipMalloc(&b, XSIZE*YSIZE); int *c = NULL; hipMalloc(&c, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( matrixAdd), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( matrixAdd), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( matrixAdd), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
62e8c154c16ab632eb0b2e4bbd993a389baf30fc.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "matrixAdd.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *a = NULL; cudaMalloc(&a, XSIZE*YSIZE); int *b = NULL; cudaMalloc(&b, XSIZE*YSIZE); int *c = NULL; cudaMalloc(&c, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); matrixAdd<<<gridBlock,threadBlock>>>(a,b,c); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { matrixAdd<<<gridBlock,threadBlock>>>(a,b,c); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { matrixAdd<<<gridBlock,threadBlock>>>(a,b,c); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
f9d668093b571175c17355d1970b5f140db4abd5.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <chrono> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "bbcu/bbcu.h" #include "bbcu/bbcu_util.h" __global__ void kernal_fp32_OptimizerAdam( int const *size_table, float * const *params_buf_table, float * const *grads_buf_table, float * const *m_buf_table, float * const *v_buf_table, float lr_t, float neg_beta1, float neg_beta2 ) { int id = threadIdx.x; int id_step = blockDim.x; int index = blockDim.y * blockIdx.y + threadIdx.y; int size = size_table[index]; float *params_buf = params_buf_table[index]; float *grads_buf = grads_buf_table[index]; float *m_buf = m_buf_table[index]; float *v_buf = v_buf_table[index]; for ( int n = id; n < size; n += id_step ) { float param = params_buf[n]; float grad = grads_buf[n]; float m = m_buf[n]; float v = v_buf[n]; m += neg_beta1 * (grad - m); v += neg_beta2 * (grad * grad - v); param -= lr_t * m / (sqrt(v) + 1e-7); m_buf[n] = m; v_buf[n] = v; params_buf[n] = param; // grads_buf[n] = 0; } } BBCU_DLL_EXPORT int bbcu_fp32_OptimizerAdam ( int size, int const *dev_size_table, float * const *dev_params_buf_table, float * const *dev_grads_buf_table, float * const *dev_m_buf_table, float * const *dev_v_buf_table, float lr_t, float beta1, float beta2, hipStream_t streamId ) { BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable()); dim3 grid(1, size); dim3 block(192, 1); hipLaunchKernelGGL(( kernal_fp32_OptimizerAdam), dim3(grid), dim3(block), 0, streamId, dev_size_table, dev_params_buf_table, dev_grads_buf_table, dev_m_buf_table, dev_v_buf_table, lr_t, (1.0f - beta1), (1.0f - beta2) ); BB_CUDA_CHECK_LAST_ERROR(); return 0; } // end of file
f9d668093b571175c17355d1970b5f140db4abd5.cu
#include <iostream> #include <chrono> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "bbcu/bbcu.h" #include "bbcu/bbcu_util.h" __global__ void kernal_fp32_OptimizerAdam( int const *size_table, float * const *params_buf_table, float * const *grads_buf_table, float * const *m_buf_table, float * const *v_buf_table, float lr_t, float neg_beta1, float neg_beta2 ) { int id = threadIdx.x; int id_step = blockDim.x; int index = blockDim.y * blockIdx.y + threadIdx.y; int size = size_table[index]; float *params_buf = params_buf_table[index]; float *grads_buf = grads_buf_table[index]; float *m_buf = m_buf_table[index]; float *v_buf = v_buf_table[index]; for ( int n = id; n < size; n += id_step ) { float param = params_buf[n]; float grad = grads_buf[n]; float m = m_buf[n]; float v = v_buf[n]; m += neg_beta1 * (grad - m); v += neg_beta2 * (grad * grad - v); param -= lr_t * m / (sqrt(v) + 1e-7); m_buf[n] = m; v_buf[n] = v; params_buf[n] = param; // grads_buf[n] = 0; } } BBCU_DLL_EXPORT int bbcu_fp32_OptimizerAdam ( int size, int const *dev_size_table, float * const *dev_params_buf_table, float * const *dev_grads_buf_table, float * const *dev_m_buf_table, float * const *dev_v_buf_table, float lr_t, float beta1, float beta2, cudaStream_t streamId ) { BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable()); dim3 grid(1, size); dim3 block(192, 1); kernal_fp32_OptimizerAdam<<<grid, block, 0, streamId>>>( dev_size_table, dev_params_buf_table, dev_grads_buf_table, dev_m_buf_table, dev_v_buf_table, lr_t, (1.0f - beta1), (1.0f - beta2) ); BB_CUDA_CHECK_LAST_ERROR(); return 0; } // end of file
c25ef00533727ecafb5b0284cf214d27188509f0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 1993-2015, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <stdio.h> #include <assert.h> // Convenience function for checking CUDA runtime API results // can be wrapped around any runtime API call. No-op in release builds. inline hipError_t checkCuda(hipError_t result) { #if defined(DEBUG) || defined(_DEBUG) if (result != hipSuccess) { fprintf(stderr, "CUDA Runtime Error: %s\n", hipGetErrorString(result)); assert(result == hipSuccess); } #endif return result; } template <typename T> __global__ void offset(T* a, int s) { int i = blockDim.x * blockIdx.x + threadIdx.x + s; a[i] = a[i] + 1; } template <typename T> __global__ void stride(T* a, int s) { int i = (blockDim.x * blockIdx.x + threadIdx.x) * s; a[i] = a[i] + 1; } template <typename T> void runTest(int deviceId, int nMB) { int blockSize = 256; float ms; T *d_a; hipEvent_t startEvent, stopEvent; int n = nMB*1024*1024/sizeof(T); // NB: d_a(33*nMB) for stride case checkCuda( hipMalloc(&d_a, n * 33 * sizeof(T)) ); checkCuda( hipEventCreate(&startEvent) ); checkCuda( hipEventCreate(&stopEvent) ); printf("Offset, Bandwidth (GB/s):\n"); hipLaunchKernelGGL(( offset), dim3(n/blockSize), dim3(blockSize), 0, 0, d_a, 0); // warm up for (int i = 0; i <= 32; i++) { checkCuda( hipMemset(d_a, 0, n * sizeof(T)) ); checkCuda( hipEventRecord(startEvent,0) ); hipLaunchKernelGGL(( offset), dim3(n/blockSize), dim3(blockSize), 0, 0, d_a, i); checkCuda( hipEventRecord(stopEvent,0) ); checkCuda( hipEventSynchronize(stopEvent) ); checkCuda( hipEventElapsedTime(&ms, startEvent, stopEvent) ); printf("%d, %f, %f\n", i, 2*nMB/ms, ms); } printf("\n"); printf("Stride, Bandwidth (GB/s):\n"); hipLaunchKernelGGL(( stride), dim3(n/blockSize), dim3(blockSize), 0, 0, d_a, 1); // warm up for (int i = 1; i <= 32; i++) { checkCuda( hipMemset(d_a, 0, n * sizeof(T)) ); checkCuda( hipEventRecord(startEvent,0) ); hipLaunchKernelGGL(( stride), dim3(n/blockSize), dim3(blockSize), 0, 0, d_a, i); checkCuda( hipEventRecord(stopEvent,0) ); checkCuda( hipEventSynchronize(stopEvent) ); checkCuda( hipEventElapsedTime(&ms, startEvent, stopEvent) ); printf("%d, %f, %f\n", i, 2*nMB/ms, ms); } checkCuda( hipEventDestroy(startEvent) ); checkCuda( hipEventDestroy(stopEvent) ); hipFree(d_a); } int main(int argc, char **argv) { int nMB = 4; int deviceId = 0; bool bFp64 = false; for (int i = 1; i < argc; i++) { if (!strncmp(argv[i], "dev=", 4)) deviceId = atoi((char*)(&argv[i][4])); else if (!strcmp(argv[i], "fp64")) bFp64 = true; } hipDeviceProp_t prop; checkCuda( hipSetDevice(deviceId) ) ; checkCuda( hipGetDeviceProperties(&prop, deviceId) ); printf("Device: %s\n", prop.name); printf("Transfer size (MB): %d\n", nMB); printf("%s Precision\n", bFp64 ? "Double" : "Single"); if (bFp64) runTest<double>(deviceId, nMB); else runTest<float>(deviceId, nMB); }
c25ef00533727ecafb5b0284cf214d27188509f0.cu
/* Copyright (c) 1993-2015, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <stdio.h> #include <assert.h> // Convenience function for checking CUDA runtime API results // can be wrapped around any runtime API call. No-op in release builds. inline cudaError_t checkCuda(cudaError_t result) { #if defined(DEBUG) || defined(_DEBUG) if (result != cudaSuccess) { fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result)); assert(result == cudaSuccess); } #endif return result; } template <typename T> __global__ void offset(T* a, int s) { int i = blockDim.x * blockIdx.x + threadIdx.x + s; a[i] = a[i] + 1; } template <typename T> __global__ void stride(T* a, int s) { int i = (blockDim.x * blockIdx.x + threadIdx.x) * s; a[i] = a[i] + 1; } template <typename T> void runTest(int deviceId, int nMB) { int blockSize = 256; float ms; T *d_a; cudaEvent_t startEvent, stopEvent; int n = nMB*1024*1024/sizeof(T); // NB: d_a(33*nMB) for stride case checkCuda( cudaMalloc(&d_a, n * 33 * sizeof(T)) ); checkCuda( cudaEventCreate(&startEvent) ); checkCuda( cudaEventCreate(&stopEvent) ); printf("Offset, Bandwidth (GB/s):\n"); offset<<<n/blockSize, blockSize>>>(d_a, 0); // warm up for (int i = 0; i <= 32; i++) { checkCuda( cudaMemset(d_a, 0, n * sizeof(T)) ); checkCuda( cudaEventRecord(startEvent,0) ); offset<<<n/blockSize, blockSize>>>(d_a, i); checkCuda( cudaEventRecord(stopEvent,0) ); checkCuda( cudaEventSynchronize(stopEvent) ); checkCuda( cudaEventElapsedTime(&ms, startEvent, stopEvent) ); printf("%d, %f, %f\n", i, 2*nMB/ms, ms); } printf("\n"); printf("Stride, Bandwidth (GB/s):\n"); stride<<<n/blockSize, blockSize>>>(d_a, 1); // warm up for (int i = 1; i <= 32; i++) { checkCuda( cudaMemset(d_a, 0, n * sizeof(T)) ); checkCuda( cudaEventRecord(startEvent,0) ); stride<<<n/blockSize, blockSize>>>(d_a, i); checkCuda( cudaEventRecord(stopEvent,0) ); checkCuda( cudaEventSynchronize(stopEvent) ); checkCuda( cudaEventElapsedTime(&ms, startEvent, stopEvent) ); printf("%d, %f, %f\n", i, 2*nMB/ms, ms); } checkCuda( cudaEventDestroy(startEvent) ); checkCuda( cudaEventDestroy(stopEvent) ); cudaFree(d_a); } int main(int argc, char **argv) { int nMB = 4; int deviceId = 0; bool bFp64 = false; for (int i = 1; i < argc; i++) { if (!strncmp(argv[i], "dev=", 4)) deviceId = atoi((char*)(&argv[i][4])); else if (!strcmp(argv[i], "fp64")) bFp64 = true; } cudaDeviceProp prop; checkCuda( cudaSetDevice(deviceId) ) ; checkCuda( cudaGetDeviceProperties(&prop, deviceId) ); printf("Device: %s\n", prop.name); printf("Transfer size (MB): %d\n", nMB); printf("%s Precision\n", bFp64 ? "Double" : "Single"); if (bFp64) runTest<double>(deviceId, nMB); else runTest<float>(deviceId, nMB); }
b2dce6e8b89ded378762c6d53de34a7ec2ce06c6.hip
// !!! This is a file automatically generated by hipify!!! #if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<4>; using LayoutFilter = cutlass::layout::TensorCxRSKx<4>; using LayoutDst = cutlass::layout::TensorNCHW; using ThreadBlockShape = cutlass::gemm::GemmShape<32, 64, 32>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 32>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwish< float, 1, int32_t, float, float>; using Convolution = cutlass::convolution::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, float, LayoutDst, float, LayoutDst, int32_t, cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle< cutlass::convolution::ConvType::kConvolution>, 2, 4, 16, false, cutlass::arch::OpMultiplyAdd>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const typename Convolution::ElementSrc* d_src, const typename Convolution::ElementFilter* d_filter, const typename Convolution::ElementBias* d_bias, const typename Convolution::ElementDst* d_z, typename Convolution::ElementDst* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, hipStream_t stream); #pragma GCC diagnostic pop #endif
b2dce6e8b89ded378762c6d53de34a7ec2ce06c6.cu
#if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<4>; using LayoutFilter = cutlass::layout::TensorCxRSKx<4>; using LayoutDst = cutlass::layout::TensorNCHW; using ThreadBlockShape = cutlass::gemm::GemmShape<32, 64, 32>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 32>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwish< float, 1, int32_t, float, float>; using Convolution = cutlass::convolution::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, float, LayoutDst, float, LayoutDst, int32_t, cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle< cutlass::convolution::ConvType::kConvolution>, 2, 4, 16, false, cutlass::arch::OpMultiplyAdd>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const typename Convolution::ElementSrc* d_src, const typename Convolution::ElementFilter* d_filter, const typename Convolution::ElementBias* d_bias, const typename Convolution::ElementDst* d_z, typename Convolution::ElementDst* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, cudaStream_t stream); #pragma GCC diagnostic pop #endif
bbbd2e320c73d4b3ed0bbcac22fd6c1bdd6ffbec.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "optkit_defs_gpu.h" #include "optkit_upsampling_vector.h" inline __device__ ok_float& __get(ok_float * data, uint i, uint j, const uint stride_row, const uint stride_col) { return data[i * stride_row + j * stride_col]; } #ifdef __cplusplus extern "C" { #endif ok_status upsamplingvec_mul_matrix(void * linalg_handle, const enum CBLAS_TRANSPOSE transU, const enum CBLAS_TRANSPOSE transI, const enum CBLAS_TRANSPOSE transO, const ok_float alpha, upsamplingvec * u, matrix * M_in, ok_float beta, matrix * M_out) { OK_CHECK_UPSAMPLINGVEC(u); OK_CHECK_MATRIX(M_in); OK_CHECK_MATRIX(M_out); ok_status err = OPTKIT_SUCCESS; size_t i, dim_in1, dim_in2, dim_out1, dim_out2; size_t ptr_stride_in, ptr_stride_out; int stride_in, stride_out; const int transpose = transU == CblasTrans; if ((!u || !M_in || !M_out) || (!u->indices || !M_in->data ||!M_out->data)) return OPTKIT_ERROR_UNALLOCATED; dim_in1 = (transI == CblasNoTrans) ? M_in->size1 : M_in->size2; dim_in2 = (transI == CblasNoTrans) ? M_in->size2 : M_in->size1; dim_out1 = (transO == CblasNoTrans) ? M_out->size1 : M_out->size2; dim_out2 = (transO == CblasNoTrans) ? M_out->size2 : M_out->size1; if (!upsampling_dims_compatible(transpose, u, dim_in1, dim_in2, dim_out1, dim_out2)) return OK_SCAN_ERR( OPTKIT_ERROR_DIMENSION_MISMATCH ); stride_in = ((transI == CblasNoTrans) == (M_in->order == CblasRowMajor)) ? 1 : (int) M_in->ld; stride_out = ((transO == CblasNoTrans) == (M_out->order == CblasRowMajor)) ? 1 : (int) M_out->ld; ptr_stride_in = (stride_in == 1) ? M_in->ld : 1; ptr_stride_out = (stride_out == 1) ? M_out->ld : 1; OK_RETURNIF_ERR( matrix_scale(M_out, beta) ); if (!transpose) for (i = 0; i < dim_out1 && !err; ++i) { size_t row_; hipStream_t s; OK_CHECK_CUDA( err, hipStreamCreate(&s) ); OK_CHECK_CUDA( err, hipMemcpyAsync(&row_, u->indices + i * u->stride, sizeof(row_), hipMemcpyDeviceToHost, s) ); OK_CHECK_CUBLAS( err, hipblasSetStream( *(hipblasHandle_t *) linalg_handle, s) ); OK_CHECK_CUBLAS( err, CUBLAS(axpy)(*(hipblasHandle_t *) linalg_handle, dim_in2, &alpha, M_in->data + row_ * ptr_stride_in, stride_in, M_out->data + i * ptr_stride_out, stride_out) ); OK_CHECK_CUDA( err, hipStreamDestroy(s) ); } else for (i = 0; i < dim_in1; ++i) { size_t row_; hipStream_t s; OK_CHECK_CUDA( err, hipStreamCreate(&s) ); OK_CHECK_CUDA( err, hipMemcpyAsync(&row_, u->indices + i * u->stride, sizeof(row_), hipMemcpyDeviceToHost, s) ); OK_CHECK_CUBLAS( err, hipblasSetStream( *(hipblasHandle_t *) linalg_handle, s) ); OK_CHECK_CUBLAS( err, CUBLAS(axpy)(*(hipblasHandle_t *) linalg_handle, dim_in2, &alpha, M_in->data + i * ptr_stride_in, stride_in, M_out->data + row_ * ptr_stride_out, stride_out) ); OK_CHECK_CUDA( err, hipStreamDestroy(s) ); } hipDeviceSynchronize(); err = OK_STATUS_CUDA; /* restore CUDA BLAS context to the default stream */ OK_MAX_ERR( err, OK_SCAN_CUBLAS( hipblasSetStream( *(hipblasHandle_t *) linalg_handle, NULL) )); return err ? err : OK_STATUS_CUDA; } static __global__ void __upsampling_count(size_t * indices, ok_float * counts, size_t stride_idx, size_t stride_cts, size_t size) { size_t i; for (i = blockIdx.x * blockDim.x + threadIdx.x; i < size; i += gridDim.x * blockDim.x) counts[indices[i * stride_idx] * stride_cts] += kOne; } ok_status upsamplingvec_count(const upsamplingvec * u, vector * counts) { if ((!u || !counts) || (!u->indices || !counts->data)) return OK_SCAN_ERR( OPTKIT_ERROR_UNALLOCATED ); uint grid_dim = calc_grid_dim(u->size1); if (u->size2 > counts->size) return OK_SCAN_ERR( OPTKIT_ERROR_DIMENSION_MISMATCH ); OK_RETURNIF_ERR( vector_scale(counts, kZero) ); hipLaunchKernelGGL(( __upsampling_count), dim3(grid_dim), dim3(kBlockSize), 0, 0, u->indices, counts->data, u->stride, counts->stride, u->size1); hipDeviceSynchronize(); return OK_STATUS_CUDA; } #ifdef __cplusplus } #endif
bbbd2e320c73d4b3ed0bbcac22fd6c1bdd6ffbec.cu
#include "optkit_defs_gpu.h" #include "optkit_upsampling_vector.h" inline __device__ ok_float& __get(ok_float * data, uint i, uint j, const uint stride_row, const uint stride_col) { return data[i * stride_row + j * stride_col]; } #ifdef __cplusplus extern "C" { #endif ok_status upsamplingvec_mul_matrix(void * linalg_handle, const enum CBLAS_TRANSPOSE transU, const enum CBLAS_TRANSPOSE transI, const enum CBLAS_TRANSPOSE transO, const ok_float alpha, upsamplingvec * u, matrix * M_in, ok_float beta, matrix * M_out) { OK_CHECK_UPSAMPLINGVEC(u); OK_CHECK_MATRIX(M_in); OK_CHECK_MATRIX(M_out); ok_status err = OPTKIT_SUCCESS; size_t i, dim_in1, dim_in2, dim_out1, dim_out2; size_t ptr_stride_in, ptr_stride_out; int stride_in, stride_out; const int transpose = transU == CblasTrans; if ((!u || !M_in || !M_out) || (!u->indices || !M_in->data ||!M_out->data)) return OPTKIT_ERROR_UNALLOCATED; dim_in1 = (transI == CblasNoTrans) ? M_in->size1 : M_in->size2; dim_in2 = (transI == CblasNoTrans) ? M_in->size2 : M_in->size1; dim_out1 = (transO == CblasNoTrans) ? M_out->size1 : M_out->size2; dim_out2 = (transO == CblasNoTrans) ? M_out->size2 : M_out->size1; if (!upsampling_dims_compatible(transpose, u, dim_in1, dim_in2, dim_out1, dim_out2)) return OK_SCAN_ERR( OPTKIT_ERROR_DIMENSION_MISMATCH ); stride_in = ((transI == CblasNoTrans) == (M_in->order == CblasRowMajor)) ? 1 : (int) M_in->ld; stride_out = ((transO == CblasNoTrans) == (M_out->order == CblasRowMajor)) ? 1 : (int) M_out->ld; ptr_stride_in = (stride_in == 1) ? M_in->ld : 1; ptr_stride_out = (stride_out == 1) ? M_out->ld : 1; OK_RETURNIF_ERR( matrix_scale(M_out, beta) ); if (!transpose) for (i = 0; i < dim_out1 && !err; ++i) { size_t row_; cudaStream_t s; OK_CHECK_CUDA( err, cudaStreamCreate(&s) ); OK_CHECK_CUDA( err, cudaMemcpyAsync(&row_, u->indices + i * u->stride, sizeof(row_), cudaMemcpyDeviceToHost, s) ); OK_CHECK_CUBLAS( err, cublasSetStream( *(cublasHandle_t *) linalg_handle, s) ); OK_CHECK_CUBLAS( err, CUBLAS(axpy)(*(cublasHandle_t *) linalg_handle, dim_in2, &alpha, M_in->data + row_ * ptr_stride_in, stride_in, M_out->data + i * ptr_stride_out, stride_out) ); OK_CHECK_CUDA( err, cudaStreamDestroy(s) ); } else for (i = 0; i < dim_in1; ++i) { size_t row_; cudaStream_t s; OK_CHECK_CUDA( err, cudaStreamCreate(&s) ); OK_CHECK_CUDA( err, cudaMemcpyAsync(&row_, u->indices + i * u->stride, sizeof(row_), cudaMemcpyDeviceToHost, s) ); OK_CHECK_CUBLAS( err, cublasSetStream( *(cublasHandle_t *) linalg_handle, s) ); OK_CHECK_CUBLAS( err, CUBLAS(axpy)(*(cublasHandle_t *) linalg_handle, dim_in2, &alpha, M_in->data + i * ptr_stride_in, stride_in, M_out->data + row_ * ptr_stride_out, stride_out) ); OK_CHECK_CUDA( err, cudaStreamDestroy(s) ); } cudaDeviceSynchronize(); err = OK_STATUS_CUDA; /* restore CUDA BLAS context to the default stream */ OK_MAX_ERR( err, OK_SCAN_CUBLAS( cublasSetStream( *(cublasHandle_t *) linalg_handle, NULL) )); return err ? err : OK_STATUS_CUDA; } static __global__ void __upsampling_count(size_t * indices, ok_float * counts, size_t stride_idx, size_t stride_cts, size_t size) { size_t i; for (i = blockIdx.x * blockDim.x + threadIdx.x; i < size; i += gridDim.x * blockDim.x) counts[indices[i * stride_idx] * stride_cts] += kOne; } ok_status upsamplingvec_count(const upsamplingvec * u, vector * counts) { if ((!u || !counts) || (!u->indices || !counts->data)) return OK_SCAN_ERR( OPTKIT_ERROR_UNALLOCATED ); uint grid_dim = calc_grid_dim(u->size1); if (u->size2 > counts->size) return OK_SCAN_ERR( OPTKIT_ERROR_DIMENSION_MISMATCH ); OK_RETURNIF_ERR( vector_scale(counts, kZero) ); __upsampling_count<<<grid_dim, kBlockSize>>>(u->indices, counts->data, u->stride, counts->stride, u->size1); cudaDeviceSynchronize(); return OK_STATUS_CUDA; } #ifdef __cplusplus } #endif
369e21d605d7537371666d5b33740cdbc00ca5ff.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <iostream> #include <hip/hip_runtime_api.h> //#include <cutil.h> #include <hip/hip_runtime.h> #define SHARED_MEM_ELEMENTS 1024 #define GLOBAL_MEM_ELEMENTS 4096 int num_blocks; int num_threads_per_block; int num_iterations; int divergence; float* h_A; float* h_B; float* h_C; float* h_res; float* d_A; float* d_B; float* d_C; float* d_res; __global__ void init_memory (unsigned long long ** my_ptr_array, unsigned long long * my_array, int stride, int num_blocks_k, int num_threads_per_block_k) { int block_id; int warp_id; int i; int index; int tid = blockDim.x * blockIdx.x + threadIdx.x; void **ptr_array = (void **)my_ptr_array; unsigned long long *array = (unsigned long long *)my_array; if (tid == 0) { // int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k; int num_warps_per_block = num_threads_per_block_k / 32; //int elements_per_warp = elements_per_block / num_warps_per_block; int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block; // for (block_id = 0; block_id < num_blocks_k; block_id++) { for (warp_id = 0; warp_id < num_warps_per_block; warp_id++) { for (i = 0; i < elements_per_warp; i++) { //index = (block_id * elements_per_block) + (warp_id * elements_per_warp); index = (warp_id * elements_per_warp); ptr_array[index + i] = (void*)&array[(index + ((i + 16) % elements_per_warp))]; } } /* for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) { ptr_array[i] = (void*)&array[(i + 32)%GLOBAL_MEM_ELEMENTS]; } */ for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) { //array[i] = (unsigned long long)ptr_array[(i+stride)%GLOBAL_MEM_ELEMENTS]; array[i] = (unsigned long long)ptr_array[i]; } } __syncthreads(); } __global__ void shared_latency (unsigned long long ** my_ptr_array, unsigned long long * my_array, int array_length, int iterations, unsigned long long * duration, int stride, int divergence, int num_blocks_k, int num_threads_per_block_k) { // unsigned long long int start_time, end_time; unsigned long long int sum_time = 0; int i, k; int tid = blockDim.x * blockIdx.x + threadIdx.x; int block_id = blockIdx.x; int warp_id = threadIdx.x / 32; int warp_thread_id = threadIdx.x % 32; // int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k; int num_warps_per_block = num_threads_per_block_k / 32; // int elements_per_warp = elements_per_block / num_warps_per_block; int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block; //int index1 = (block_id * elements_per_block) + (warp_id * elements_per_warp) + warp_thread_id; int index1 = (warp_id * elements_per_warp) + warp_thread_id; void **ptr_array = (void **)my_ptr_array; unsigned long long int *array = (unsigned long long int *)my_array; void **tmp_ptr; //tmp_ptr = (void *)sdata; //tmp_ptr = (void **)(&(ptr_array[(threadIdx.x * stride)%GLOBAL_MEM_ELEMENTS])); //tmp_ptr = (void **)(&(ptr_array[(tid * stride)%GLOBAL_MEM_ELEMENTS])); tmp_ptr = (void **)(&(ptr_array[index1])); double f1, f2, f3; f1 = 1.1; f2 = 2.5; if (warp_thread_id < divergence) { /* __asm volatile ( ".reg .f32 %r114;\n\t" "mov.f32 %r114, 2.2;\n\t" ); */ for (int l = 0; l < iterations; l++) { f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); } } // __syncthreads(); // if ((blockDim.x * blockIdx.x + threadIdx.x) == 0) duration[tid] = (unsigned long long)(*tmp_ptr) + (f1 * tid); // __syncthreads(); } void usage() { std::cout << "Usage ./binary <num_blocks> <num_threads_per_block> <iterations>" "threads active per warp" << std::endl; } void parametric_measure_shared(int N, int iterations, int stride) { hipProfilerStop(); int i; unsigned long long int * h_a; unsigned long long int * d_a; unsigned long long ** h_ptr_a; unsigned long long ** d_ptr_a; unsigned long long * duration; unsigned long long * latency; hipError_t error_id; /* allocate array on CPU */ h_a = (unsigned long long *)malloc(sizeof(unsigned long long int) * N); h_ptr_a = (unsigned long long **)malloc(sizeof(unsigned long long int*)*N); latency = (unsigned long long *)malloc(sizeof(unsigned long long) * num_threads_per_block * num_blocks); /* initialize array elements on CPU */ for (i = 0; i < N; i++) { h_ptr_a[i] = (unsigned long long *)&h_a[i]; } for (i = 0; i < N; i++) { h_a[i] = (unsigned long long)h_ptr_a[(i + 1 + stride) % N]; } /* allocate arrays on GPU */ hipMalloc ((void **) &d_a, sizeof(unsigned long long int) * N ); hipMalloc ((void **) &d_ptr_a, sizeof(unsigned long long int*) * N ); hipMalloc ((void **) &duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks); hipDeviceSynchronize (); error_id = hipGetLastError(); if (error_id != hipSuccess) { printf("Error 1 is %s\n", hipGetErrorString(error_id)); } /* copy array elements from CPU to GPU */ hipMemcpy((void *)d_a, (void *)h_a, sizeof(unsigned long long int) * N, hipMemcpyHostToDevice); hipMemcpy((void *)d_ptr_a, (void *)h_ptr_a, sizeof(unsigned long long int *) * N, hipMemcpyHostToDevice); hipMemcpy((void *)duration, (void *)latency, sizeof(unsigned long long) * num_threads_per_block * num_blocks, hipMemcpyHostToDevice); hipDeviceSynchronize (); error_id = hipGetLastError(); if (error_id != hipSuccess) { printf("Error 2 is %s\n", hipGetErrorString(error_id)); } hipLaunchKernelGGL(( init_memory) , dim3(1), dim3(1), 0, 0, d_ptr_a, d_a, stride, num_blocks, num_threads_per_block); hipDeviceSynchronize(); /* launch kernel*/ //dim3 Db = dim3(13); //dim3 Dg = dim3(768,1,1); //printf("Launch kernel with parameters: %d, N: %d, stride: %d\n", iterations, N, stride); // int sharedMemSize = sizeof(unsigned long long int) * N ; hipEvent_t start, stop; float time; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); hipProfilerStart(); hipFuncSetCacheConfig(shared_latency, hipFuncCachePreferL1); //shared_latency <<<Dg, Db, sharedMemSize>>>(d_a, N, iterations, duration); //shared_latency <<<num_blocks, num_threads_per_block, sharedMemSize>>>(d_a, N, num_iterations, duration, stride, divergence); hipLaunchKernelGGL(( shared_latency) , dim3(num_blocks), dim3(num_threads_per_block), 0, 0, d_ptr_a, d_a, N, num_iterations, duration, stride, divergence, num_blocks, num_threads_per_block); hipDeviceSynchronize(); ///hipDeviceSynchronize (); hipProfilerStop(); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); error_id = hipGetLastError(); if (error_id != hipSuccess) { printf("Error 3 is %s\n", hipGetErrorString(error_id)); } /* copy results from GPU to CPU */ hipMemcpy((void *)h_a, (void *)d_a, sizeof(unsigned long long int) * N, hipMemcpyDeviceToHost); hipMemcpy((void *)latency, (void *)duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks, hipMemcpyDeviceToHost); hipDeviceSynchronize (); /* print results*/ unsigned long long max_dur = latency[0]; unsigned long long min_dur = latency[0]; unsigned long long avg_lat = latency[0]; for (int i = 1; i < num_threads_per_block * num_blocks; i++) { avg_lat += latency[i]; if (latency[i] > max_dur) { max_dur = latency[i]; } else if (latency[i] < min_dur) { min_dur = latency[i]; } } // printf(" %d, %f, %f, %f, %f\n",stride,(double)(avg_lat/(num_threads_per_block * num_blocks * 256.0 *num_iterations)), (double)(min_dur/(256.0 * num_iterations)), (double)(max_dur/(256.0 * num_iterations)), time); printf("%f\n", time); /* free memory on GPU */ hipFree(d_a); hipFree(d_ptr_a); hipFree(duration); hipDeviceSynchronize (); /*free memory on CPU */ free(h_a); free(h_ptr_a); free(latency); } int main(int argc, char **argv) { int N; if (argc != 6) { usage(); exit(1); } num_blocks = atoi(argv[1]); num_threads_per_block = atoi(argv[2]); num_iterations = atoi(argv[3]); divergence = atoi(argv[4]); int stride = atoi(argv[5]); N = GLOBAL_MEM_ELEMENTS; parametric_measure_shared(N, 10, stride); return 0; }
369e21d605d7537371666d5b33740cdbc00ca5ff.cu
#include <stdio.h> #include <iostream> #include <cuda_profiler_api.h> //#include <cutil.h> #include <cuda_runtime.h> #define SHARED_MEM_ELEMENTS 1024 #define GLOBAL_MEM_ELEMENTS 4096 int num_blocks; int num_threads_per_block; int num_iterations; int divergence; float* h_A; float* h_B; float* h_C; float* h_res; float* d_A; float* d_B; float* d_C; float* d_res; __global__ void init_memory (unsigned long long ** my_ptr_array, unsigned long long * my_array, int stride, int num_blocks_k, int num_threads_per_block_k) { int block_id; int warp_id; int i; int index; int tid = blockDim.x * blockIdx.x + threadIdx.x; void **ptr_array = (void **)my_ptr_array; unsigned long long *array = (unsigned long long *)my_array; if (tid == 0) { // int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k; int num_warps_per_block = num_threads_per_block_k / 32; //int elements_per_warp = elements_per_block / num_warps_per_block; int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block; // for (block_id = 0; block_id < num_blocks_k; block_id++) { for (warp_id = 0; warp_id < num_warps_per_block; warp_id++) { for (i = 0; i < elements_per_warp; i++) { //index = (block_id * elements_per_block) + (warp_id * elements_per_warp); index = (warp_id * elements_per_warp); ptr_array[index + i] = (void*)&array[(index + ((i + 16) % elements_per_warp))]; } } /* for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) { ptr_array[i] = (void*)&array[(i + 32)%GLOBAL_MEM_ELEMENTS]; } */ for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) { //array[i] = (unsigned long long)ptr_array[(i+stride)%GLOBAL_MEM_ELEMENTS]; array[i] = (unsigned long long)ptr_array[i]; } } __syncthreads(); } __global__ void shared_latency (unsigned long long ** my_ptr_array, unsigned long long * my_array, int array_length, int iterations, unsigned long long * duration, int stride, int divergence, int num_blocks_k, int num_threads_per_block_k) { // unsigned long long int start_time, end_time; unsigned long long int sum_time = 0; int i, k; int tid = blockDim.x * blockIdx.x + threadIdx.x; int block_id = blockIdx.x; int warp_id = threadIdx.x / 32; int warp_thread_id = threadIdx.x % 32; // int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k; int num_warps_per_block = num_threads_per_block_k / 32; // int elements_per_warp = elements_per_block / num_warps_per_block; int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block; //int index1 = (block_id * elements_per_block) + (warp_id * elements_per_warp) + warp_thread_id; int index1 = (warp_id * elements_per_warp) + warp_thread_id; void **ptr_array = (void **)my_ptr_array; unsigned long long int *array = (unsigned long long int *)my_array; void **tmp_ptr; //tmp_ptr = (void *)sdata; //tmp_ptr = (void **)(&(ptr_array[(threadIdx.x * stride)%GLOBAL_MEM_ELEMENTS])); //tmp_ptr = (void **)(&(ptr_array[(tid * stride)%GLOBAL_MEM_ELEMENTS])); tmp_ptr = (void **)(&(ptr_array[index1])); double f1, f2, f3; f1 = 1.1; f2 = 2.5; if (warp_thread_id < divergence) { /* __asm volatile ( ".reg .f32 %r114;\n\t" "mov.f32 %r114, 2.2;\n\t" ); */ for (int l = 0; l < iterations; l++) { f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); } } // __syncthreads(); // if ((blockDim.x * blockIdx.x + threadIdx.x) == 0) duration[tid] = (unsigned long long)(*tmp_ptr) + (f1 * tid); // __syncthreads(); } void usage() { std::cout << "Usage ./binary <num_blocks> <num_threads_per_block> <iterations>" "threads active per warp" << std::endl; } void parametric_measure_shared(int N, int iterations, int stride) { cudaProfilerStop(); int i; unsigned long long int * h_a; unsigned long long int * d_a; unsigned long long ** h_ptr_a; unsigned long long ** d_ptr_a; unsigned long long * duration; unsigned long long * latency; cudaError_t error_id; /* allocate array on CPU */ h_a = (unsigned long long *)malloc(sizeof(unsigned long long int) * N); h_ptr_a = (unsigned long long **)malloc(sizeof(unsigned long long int*)*N); latency = (unsigned long long *)malloc(sizeof(unsigned long long) * num_threads_per_block * num_blocks); /* initialize array elements on CPU */ for (i = 0; i < N; i++) { h_ptr_a[i] = (unsigned long long *)&h_a[i]; } for (i = 0; i < N; i++) { h_a[i] = (unsigned long long)h_ptr_a[(i + 1 + stride) % N]; } /* allocate arrays on GPU */ cudaMalloc ((void **) &d_a, sizeof(unsigned long long int) * N ); cudaMalloc ((void **) &d_ptr_a, sizeof(unsigned long long int*) * N ); cudaMalloc ((void **) &duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks); cudaThreadSynchronize (); error_id = cudaGetLastError(); if (error_id != cudaSuccess) { printf("Error 1 is %s\n", cudaGetErrorString(error_id)); } /* copy array elements from CPU to GPU */ cudaMemcpy((void *)d_a, (void *)h_a, sizeof(unsigned long long int) * N, cudaMemcpyHostToDevice); cudaMemcpy((void *)d_ptr_a, (void *)h_ptr_a, sizeof(unsigned long long int *) * N, cudaMemcpyHostToDevice); cudaMemcpy((void *)duration, (void *)latency, sizeof(unsigned long long) * num_threads_per_block * num_blocks, cudaMemcpyHostToDevice); cudaThreadSynchronize (); error_id = cudaGetLastError(); if (error_id != cudaSuccess) { printf("Error 2 is %s\n", cudaGetErrorString(error_id)); } init_memory <<<1, 1>>>(d_ptr_a, d_a, stride, num_blocks, num_threads_per_block); cudaDeviceSynchronize(); /* launch kernel*/ //dim3 Db = dim3(13); //dim3 Dg = dim3(768,1,1); //printf("Launch kernel with parameters: %d, N: %d, stride: %d\n", iterations, N, stride); // int sharedMemSize = sizeof(unsigned long long int) * N ; cudaEvent_t start, stop; float time; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); cudaProfilerStart(); cudaFuncSetCacheConfig(shared_latency, cudaFuncCachePreferL1); //shared_latency <<<Dg, Db, sharedMemSize>>>(d_a, N, iterations, duration); //shared_latency <<<num_blocks, num_threads_per_block, sharedMemSize>>>(d_a, N, num_iterations, duration, stride, divergence); shared_latency <<<num_blocks, num_threads_per_block>>>(d_ptr_a, d_a, N, num_iterations, duration, stride, divergence, num_blocks, num_threads_per_block); cudaDeviceSynchronize(); ///cudaThreadSynchronize (); cudaProfilerStop(); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); error_id = cudaGetLastError(); if (error_id != cudaSuccess) { printf("Error 3 is %s\n", cudaGetErrorString(error_id)); } /* copy results from GPU to CPU */ cudaMemcpy((void *)h_a, (void *)d_a, sizeof(unsigned long long int) * N, cudaMemcpyDeviceToHost); cudaMemcpy((void *)latency, (void *)duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks, cudaMemcpyDeviceToHost); cudaThreadSynchronize (); /* print results*/ unsigned long long max_dur = latency[0]; unsigned long long min_dur = latency[0]; unsigned long long avg_lat = latency[0]; for (int i = 1; i < num_threads_per_block * num_blocks; i++) { avg_lat += latency[i]; if (latency[i] > max_dur) { max_dur = latency[i]; } else if (latency[i] < min_dur) { min_dur = latency[i]; } } // printf(" %d, %f, %f, %f, %f\n",stride,(double)(avg_lat/(num_threads_per_block * num_blocks * 256.0 *num_iterations)), (double)(min_dur/(256.0 * num_iterations)), (double)(max_dur/(256.0 * num_iterations)), time); printf("%f\n", time); /* free memory on GPU */ cudaFree(d_a); cudaFree(d_ptr_a); cudaFree(duration); cudaThreadSynchronize (); /*free memory on CPU */ free(h_a); free(h_ptr_a); free(latency); } int main(int argc, char **argv) { int N; if (argc != 6) { usage(); exit(1); } num_blocks = atoi(argv[1]); num_threads_per_block = atoi(argv[2]); num_iterations = atoi(argv[3]); divergence = atoi(argv[4]); int stride = atoi(argv[5]); N = GLOBAL_MEM_ELEMENTS; parametric_measure_shared(N, 10, stride); return 0; }
8e7fdaec57619eb197490d9ed3b776734551b2cc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Ghepard: GPU trial factoring for Marsenne numbers. Copyright (c) Mihai Preda, 2015 - 2016. /* "Ghepard" is a program for trial factoring of Mersenne numbers on a CUDA GPU. Mersenne numbers are of the form 2**exp - 1; see http://www.mersenne.org/various/math.php This is inpired by mfaktc: http://www.mersenneforum.org/mfaktc/ For a given mersenne number 2**exp-1, where exp is prime, the factors are of the form m = 2*k*exp + 1, and we're interested only in prime factors. Limits: exp < 2**31; 2**64 < m < 2**76. First prime candidate factors are generated -- this is called "sieving" because it uses Erathostene's sieve. Next each candidate m is tested by the computing the modular exponentiation reminder r = 2**exp modulo m. If this reminder is equal to 1, it means that m is a factor of 2^exp-1, and thus the mersenne number is not prime. Naming conventions used: 1. type names: - u8, u16, u32, u64, u128: unsigned integer with the given number of *bits*. - U2, U3, U4, etc: unsigned long integer with the given number of 32-bit words. The words of a long integer are named "a", "b", "c", etc, a being the least-significant. 2. operators on long integers: - usual: +, -, *. - bit shifts: <<, >>. - shr1w(): word shift right - funnel shift returning one word: shl, shr - cast to larger type, e.g. _U4(U3 x) - mulLow(): multiplication computing only the lower words - shr3wMul(): multiplication computing only the higher words - equality == - square */ #include <stdio.h> #include <assert.h> #include <sys/time.h> #include <sys/unistd.h> #define DEVICE __device__ static #define ASIZE(a) (sizeof(a) / sizeof(a[0])) typedef unsigned char u8; typedef unsigned short u16; typedef unsigned u32; typedef unsigned long long u64; typedef __uint128_t u128; // Multi-precision unsigned ints with the given number of words. // The least-significant word is "a". struct U2 { u32 a, b; }; struct U3 { u32 a, b, c; }; struct U4 { u32 a, b, c, d; }; struct U5 { u32 a, b, c, d, e; }; #include "debug.h" #include "widemath.h" // Table of small primes. DEVICE const __restrict__ u32 primes[] = { #include "primes128.inc" }; // Number of pre-computed primes for sieving. #define NPRIMES (ASIZE(primes)) // Unit tests. A series of pairs (exponent, k) where k represents a factor. struct Test { u32 exp; u64 k; }; #include "tests.inc" // Threads per sieving block. #define SIEVE_THREADS (512 + 128 + 32) // Threads per testing block. #define TEST_THREADS 512 #define TEST_BLOCKS 128 // How many words of shared memory to use for sieving. #define NWORDS (8 * 1024) // Bits for sieving (each word is 32 bits). #define NBITS (NWORDS << 5) // Must update acceptClass() when changing these. #define NCLASS (4 * 3 * 5 * 7 * 11) // Out of NCLASS, how many classes pass acceptClass(). Sync with NCLASS. #define NGOODCLASS (2 * 2 * 4 * 6 * 10) #define SIEVE_BLOCKS 48 #define KTAB_SIZE ((int)(NGOODCLASS * NBITS * 0.195f)) // Some powers of 2 as floats, used by inv160() #define TWO16f 65536.0f #define TWO17f 131072.0f #define TWO28f 268435456.0f #define TWO32f 4294967296.0f #define TWO64f 18446744073709551616.0f // Table with inv(exp). Initialized once per exponent. DEVICE u32 invTab[NPRIMES]; // "Bit to clear" table, depends on exponent and k0; initialized once per exponent. DEVICE int btcTabs[NGOODCLASS][NPRIMES]; // Sieved Ks table. sieve() outputs here, test() reads from here. // kTabSize contains the size of kTab, and must be set to 0 before each sieve() invocation. DEVICE u32 kTab[KTAB_SIZE]; DEVICE u32 kTabSize; // If a factor m is found, save it here. DEVICE U3 foundFactor = (U3) {0, 0, 0}; // The class id for each good class; set by initClassTab(). DEVICE u16 classTab[NGOODCLASS]; // Pinned pieces of host memory used to copy to/from GPU. U3 *hostFactor; u32 *hostN; // Address of kTabSize u32 *kTabSizeHost; hipStream_t stream; // Helper to check and bail out on any CUDA error. #define CUDA_CHECK {hipError_t _err = hipGetLastError(); if (_err) { printf("CUDA error: %s\n", hipGetErrorString(_err)); return 0; }} u64 timeMillis() { struct timeval tv; gettimeofday(&tv, 0); return tv.tv_sec * 1000 + tv.tv_usec / 1000; } // Returns x % m, given u the "inverse" of m (2**160 / m); m at most 77 bits. DEVICE U3 mod(U5 x, U3 m, U3 u) { return (U3){x.a, x.b, x.c} - mulLow(m, mulHi((U3) {x.c, x.d, x.e}, u)); } // float lower approximation of 2**32 / x DEVICE float floatInv(U3 x) { return __frcp_rd(__ull2float_ru(_u64(shr1w(x)) + 1)); } // float lower approximation of a + b * 2**32; (__fmaf_rz(b, TWO32f, a)) DEVICE float floatOf(u32 a, u32 b) { return __ull2float_rz(_u64((U2) {a, b})); } // float lower approximation of (a + b * 2**32) * nf DEVICE float floatOf(u32 a, u32 b, float nf) { return __fmul_rz(floatOf(a, b), nf); } // Returns 2**160 / n DEVICE U3 inv160(U3 n, float nf) { // 1 assert(nf * TWO64f < TWO32f); u32 rc = (u32) __fmul_rz(TWO64f, nf); U4 q = shl1w(~mulLow(n, rc) + 1); // 2 float qf = floatOf(q.c, q.d, nf) * TWO16f; assert(qf < TWO28f); u32 qi = (u32) qf; u32 rb = (qi << 16); rc += (qi >> 16); q = q - ((n * qi) << 16); assert(q.d == 0); // 3 qf = floatOf(q.b, q.c, nf); assert(qf < (1 << 24)); qi = (u32) qf; U2 rup = (U2){rb, rc} + qi; q = q - n * qi; assert(q.d == 0); // 4 qf = floatOf(q.b, q.c, nf) * TWO17f; assert(qf < (1 << 22)); qi = (u32) qf; rup = rup + (qi >> 17); U3 ret = (U3) {(qi << 15), rup.a, rup.b}; q = ((U4) {0, q.a, q.b, q.c}) - ((n * qi) << 15); assert(q.d == 0); // 5 qf = floatOf(q.b, q.c, nf); assert(qf < (1 << 20)); return ret + (u32) qf; #endif } DEVICE u32 modInv32(u64 step, u32 prime) { int n = step % prime; int q = prime / n; int d = prime - q * n; int x = -q; int prevX = 1; while (d) { q = n / d; { int save = d; d = n - q * d; n = save; } // n = set(d, n - q * d); { int save = x; x = prevX - q * x; prevX = save; } // prevX = set(x, prevX - q * x); } return (prevX >= 0) ? prevX : (prevX + prime); } // 3 times 64bit modulo, expensive! DEVICE int bitToClear(u32 exp, u64 k, u32 prime, u32 inv) { u32 kmod = k % prime; u32 qmod = (kmod * (u64) (exp << 1) + 1) % prime; return (prime - qmod) * (u64) inv % prime; } __global__ void initInvTab(u32 exp) { assert(gridDim.x * blockDim.x == NPRIMES); u32 id = blockIdx.x * blockDim.x + threadIdx.x; invTab[id] = modInv32(2 * NCLASS * (u64) exp, primes[id]); } __global__ void initBtcTabs(u32 exp, u64 kBase) { assert(gridDim.x == NGOODCLASS); int *btcTab = btcTabs[blockIdx.x]; u64 k = kBase + classTab[blockIdx.x]; // if (!threadIdx.x) { printf("start class %d (%d)\n", classTab[blockIdx.x], blockIdx.x); } for (int id = threadIdx.x; id < NPRIMES; id += blockDim.x) { btcTab[id] = bitToClear(exp, k, primes[id], invTab[id]); } // if (!threadIdx.x) { printf("ended class %d (%d)\n", classTab[blockIdx.x], blockIdx.x); } } // Returns whether 2 * c * exp + 1 is 1 or 7 modulo 8. // Any Marsenne factor must be of this form. See http://www.mersenne.org/various/math.php DEVICE bool q1or7mod8(u32 exp, u32 c) { return !(c & 3) || ((c & 3) + (exp & 3) == 4); } // whether 2 * c * exp + 1 != 0 modulo prime DEVICE bool multiple(u32 exp, u32 c, unsigned prime) { return (2 * c * (u64) exp) % prime == (prime - 1); } // Among all the NCLASS classes, select the ones that are "good", // i.e. not corresponding to a multiple of a small prime. __global__ void initClasses(u32 exp) { __shared__ u32 pos; pos = 0; __syncthreads(); for (int c = threadIdx.x; c < NCLASS; c += blockDim.x) { if (q1or7mod8(exp, c) && !multiple(exp, c, 3) && !multiple(exp, c, 5) && !multiple(exp, c, 7) && !multiple(exp, c, 11)) { classTab[atomicAdd(&pos, 1)] = c; } } #ifndef NDEBUG __syncthreads(); assert(pos == NGOODCLASS); #endif } // Returns (2**exp % m) == 1 DEVICE bool expMod(u32 exp, U3 m, U3 b) { assert(m.c && !(m.c & 0xffffc000)); float nf = floatInv(m); U3 u = inv160(m, nf); U3 a = mod((U5) {0, 0, b.a, b.b, b.c}, m, u); do { a = mod(square(a), m, u); if (exp & 0x80000000) { a = a + a; } } while (exp += exp); a = a - mulLow(m, (u32) floatOf(a.b, a.c, nf)); if (a.c >= m.c && a.a == (m.a + 1)) { a = a - m; } return !(a.b | a.c | (a.a - 1)); // a.a == 1 && !a.b && !a.c; } __global__ void test(u32 doubleExp, u32 flushedExp, U3 m0, U3 b) { for (u32 i = blockIdx.x * blockDim.x + threadIdx.x, end = kTabSize; i < end; i += blockDim.x * gridDim.x) { U3 m = m0 + _U2(kTab[i] * (u64) doubleExp); if (expMod(flushedExp, m, b)) { foundFactor = m; } } } __global__ void trysub(U3 a, U3 b) { // a.a |= threadIdx.x; foundFactor = a - b; print("x", foundFactor); } __global__ void tryadd(U3 a, U3 b) { foundFactor = a + b; } // Sieve bits using shared memory. // For each prime from the primes[] table, starting at a position corresponding to a // multiple of prime ("btc"), periodically set the bit to indicate a non-prime. __global__ void sieve() { __shared__ u32 words[NWORDS]; // Set shared memory to zero. for (int i = threadIdx.x; i < NWORDS; i += blockDim.x) { words[i] = 0; } for (int loop = blockIdx.x; loop < NGOODCLASS; loop += gridDim.x) { __syncthreads(); // Sieve bits. int *btcTab = btcTabs[loop]; for (int i = threadIdx.x; i < (NPRIMES - 32); i += blockDim.x) { int btc = btcTab[i]; int prime = primes[i]; while (btc < NBITS) { atomicOr(words + (btc >> 5), 1 << (btc & 0x1f)); btc += prime; } btcTab[i] = btc - NBITS; } __syncthreads(); int popc = 0; for (int i = threadIdx.x; i < NWORDS; i += blockDim.x) { popc += __popc(~words[i]); } u32 *out = kTab + atomicAdd(&kTabSize, popc); u32 c = classTab[loop]; for (int i = threadIdx.x; i < NWORDS; i += blockDim.x) { u32 bits = ~words[i]; words[i] = 0; while (bits) { int bit = __clz(__brev(bits)); // Equivalent to: __ffs(bits) - 1; bits &= bits - 1; // Equivalent to: bits &= ~(1 << bit); but likely faster *out++ = c + ((i << 5) + bit) * NCLASS; // dummy += c + ((i << 5) + bit) * NCLASS; ++out; } } } } // int bit = bfind(bits); // bits &= ~(1 << bit); // The smallest k that produces a factor m = (2*k*exp + 1) such that m >= 2**bits u64 calculateK(u32 exp, int bits) { return ((((u128) 1) << (bits - 1)) + (exp - 2)) / exp; } void time(const char *s = 0) { static u64 prev = 0; u64 now = timeMillis(); if (prev && s) { printf("%s: %llu ms\n", s, now - prev); } prev = now; } void initExponent(u32 exp) { hipLaunchKernelGGL(( initClasses), dim3(1), dim3(1024), 0, 0, exp); hipLaunchKernelGGL(( initInvTab), dim3(NPRIMES/TEST_THREADS), dim3(TEST_THREADS), 0, 0, exp); // time("init Exp"); } u128 _u128(U3 x) { return x.a | (((u64) x.b) << 32) | (((u128) x.c) << 64); } U3 _U3(u128 x) { return (U3) {(u32) x, (u32)(((u64)x) >> 32), (u32)(x >> 64)}; } u32 oneShl(unsigned sh) { return (sh < 32) ? (1 << sh) : 0; } u128 factor(u32 exp, u64 k0, u32 repeat) { hipLaunchKernelGGL(( initBtcTabs), dim3(NGOODCLASS), dim3(TEST_THREADS), 0, 0, exp, k0); // hipDeviceSynchronize(); CUDA_CHECK; time("init K"); u32 doubleExp = exp + exp; u32 flushedExp = exp << __builtin_clz(exp); assert(flushedExp & 0x80000000); unsigned sh; if ((flushedExp >> 25) < 80) { sh = flushedExp >> 24; flushedExp <<= 8; } else { sh = flushedExp >> 25; flushedExp <<= 7; } assert(sh >= 80 && sh < 160); U3 b = (U3) {oneShl(sh - 64), oneShl(sh - 96), oneShl(sh - 128)}; *hostFactor = (U3) {0, 0, 0}; *hostN = 0; hipMemcpyToSymbolAsync(foundFactor, hostFactor, sizeof(U3), 0, hipMemcpyHostToDevice, stream); int minLeft = 1000000; repeat = 4; for (int i = 0; i < repeat; ++i, k0 += NBITS * NCLASS) { if (i == 0) { hipMemcpyAsync(kTabSizeHost, hostFactor, sizeof(u32), hipMemcpyHostToDevice, stream); hipLaunchKernelGGL(( sieve), dim3(SIEVE_BLOCKS), dim3(SIEVE_THREADS), 0, stream, ); } hipMemcpyAsync(hostN, kTabSizeHost, sizeof(u32), hipMemcpyDeviceToHost, stream); U3 m = _U3(doubleExp * (u128) k0) | 1; assert(m.c); hipLaunchKernelGGL(( test), dim3(TEST_BLOCKS), dim3(TEST_THREADS), 0, stream, doubleExp, flushedExp, m, b); hipMemcpyFromSymbolAsync(hostFactor, foundFactor, sizeof(U3), 0, hipMemcpyDeviceToHost, stream); hipStreamSynchronize(stream); CUDA_CHECK; int spaceLeft = ASIZE(kTab) - *hostN; if (spaceLeft < minLeft) { minLeft = spaceLeft; } printf("%u %d ", *hostN, spaceLeft); time("time"); if (hostFactor->a != 0) { if (repeat > 1) { printf("Did %d cycles out of %d\n", i + 1, repeat); } return _u128(*hostFactor); } } printf("min space left %d\n", minLeft); return 0; } u128 factor(u32 exp, u32 startPow2) { // printf("%d\n", startPow2); u64 k0 = calculateK(exp, startPow2); k0 -= k0 % NCLASS; u64 kEnd = calculateK(exp, startPow2 + 1); kEnd += (NCLASS - (kEnd % NCLASS)) % NCLASS; u32 repeat = (kEnd - k0 + (NBITS * NCLASS - 1)) / (NBITS * NCLASS); // printf("kend %llu\n", kEnd); return factor(exp, k0, repeat); } bool verifyFactor(u32 exp, u64 k) { u64 k0 = k - (k % NCLASS); u128 m = factor(exp, k0, 1); if (m != 2 * exp * (u128) k + 1) { printf("\nFAIL: %u %llu\n", exp, k); return false; } return true; } bool run(int argc, char **argv) { if (argc == 1) { printf("Quick selftest..\n"); for (Test *t = tests, *end = tests + ASIZE(tests); t < end; ++t) { u32 exp = t->exp; u64 k = t->k; printf("\r%4d: %9u %15llu ", (int) (t - tests), exp, k); initExponent(exp); if (!verifyFactor(exp, k)) { printf("\nFAIL: %u %llu\n", exp, k); return false; } } printf("\nExtended selftest..\n"); for (Test *t = tests, *end = tests + ASIZE(tests); t < end; ++t) { u32 exp = t->exp; u64 k = t->k; printf("\r%4d: %9u %15llu\n", (int) (t - tests), exp, k); initExponent(exp); u128 m = 2 * exp * (u128) k + 1; u32 mup = (u32)(m >> 64); assert(mup); u32 pow2 = 95 - __builtin_clz(mup); u128 m2 = factor(exp, pow2); if (m2 != m) { printf("\nFAIL: %u %llu\n", exp, k); return false; } } } else { u32 exp = (u32) atol(argv[1]); int startPow2 = (argc >= 3) ? atoi(argv[2]) : 65; initExponent(exp); u128 m = factor(exp, startPow2); if (m != 0) { printf("m: 0x%016llx%016llx\n", (u64) (m >> 64), (u64) m); } } return true; } int main(int argc, char **argv) { time(); assert(argc > 0); assert(NPRIMES % 1024 == 0); // hipSetDevice(1); hipSetDeviceFlags(hipDeviceScheduleBlockingSync); CUDA_CHECK; hipStreamCreate(&stream); hipHostMalloc((void **) &hostFactor, sizeof(U3), hipHostMallocDefault); hipHostMalloc((void **) &hostN, sizeof(u32), hipHostMallocDefault); hipGetSymbolAddress((void **)&kTabSizeHost, kTabSize); hipDeviceSynchronize(); CUDA_CHECK; time("host alloc"); U3 a{0, 0, 1}; U3 b{1, 0, 0}; hipLaunchKernelGGL(( trysub), dim3(1), dim3(1), 0, 0, a, b); hipDeviceSynchronize(); hipMemcpyFromSymbol(hostFactor, foundFactor, sizeof(U3), 0); // print("x", *hostFactor); run(argc, argv); // Clean-up before exit hipDeviceSynchronize(); hipHostFree(hostFactor); hipHostFree(hostN); hipStreamDestroy(stream); hipDeviceReset(); }
8e7fdaec57619eb197490d9ed3b776734551b2cc.cu
// Ghepard: GPU trial factoring for Marsenne numbers. Copyright (c) Mihai Preda, 2015 - 2016. /* "Ghepard" is a program for trial factoring of Mersenne numbers on a CUDA GPU. Mersenne numbers are of the form 2**exp - 1; see http://www.mersenne.org/various/math.php This is inpired by mfaktc: http://www.mersenneforum.org/mfaktc/ For a given mersenne number 2**exp-1, where exp is prime, the factors are of the form m = 2*k*exp + 1, and we're interested only in prime factors. Limits: exp < 2**31; 2**64 < m < 2**76. First prime candidate factors are generated -- this is called "sieving" because it uses Erathostene's sieve. Next each candidate m is tested by the computing the modular exponentiation reminder r = 2**exp modulo m. If this reminder is equal to 1, it means that m is a factor of 2^exp-1, and thus the mersenne number is not prime. Naming conventions used: 1. type names: - u8, u16, u32, u64, u128: unsigned integer with the given number of *bits*. - U2, U3, U4, etc: unsigned long integer with the given number of 32-bit words. The words of a long integer are named "a", "b", "c", etc, a being the least-significant. 2. operators on long integers: - usual: +, -, *. - bit shifts: <<, >>. - shr1w(): word shift right - funnel shift returning one word: shl, shr - cast to larger type, e.g. _U4(U3 x) - mulLow(): multiplication computing only the lower words - shr3wMul(): multiplication computing only the higher words - equality == - square */ #include <stdio.h> #include <assert.h> #include <sys/time.h> #include <sys/unistd.h> #define DEVICE __device__ static #define ASIZE(a) (sizeof(a) / sizeof(a[0])) typedef unsigned char u8; typedef unsigned short u16; typedef unsigned u32; typedef unsigned long long u64; typedef __uint128_t u128; // Multi-precision unsigned ints with the given number of words. // The least-significant word is "a". struct U2 { u32 a, b; }; struct U3 { u32 a, b, c; }; struct U4 { u32 a, b, c, d; }; struct U5 { u32 a, b, c, d, e; }; #include "debug.h" #include "widemath.h" // Table of small primes. DEVICE const __restrict__ u32 primes[] = { #include "primes128.inc" }; // Number of pre-computed primes for sieving. #define NPRIMES (ASIZE(primes)) // Unit tests. A series of pairs (exponent, k) where k represents a factor. struct Test { u32 exp; u64 k; }; #include "tests.inc" // Threads per sieving block. #define SIEVE_THREADS (512 + 128 + 32) // Threads per testing block. #define TEST_THREADS 512 #define TEST_BLOCKS 128 // How many words of shared memory to use for sieving. #define NWORDS (8 * 1024) // Bits for sieving (each word is 32 bits). #define NBITS (NWORDS << 5) // Must update acceptClass() when changing these. #define NCLASS (4 * 3 * 5 * 7 * 11) // Out of NCLASS, how many classes pass acceptClass(). Sync with NCLASS. #define NGOODCLASS (2 * 2 * 4 * 6 * 10) #define SIEVE_BLOCKS 48 #define KTAB_SIZE ((int)(NGOODCLASS * NBITS * 0.195f)) // Some powers of 2 as floats, used by inv160() #define TWO16f 65536.0f #define TWO17f 131072.0f #define TWO28f 268435456.0f #define TWO32f 4294967296.0f #define TWO64f 18446744073709551616.0f // Table with inv(exp). Initialized once per exponent. DEVICE u32 invTab[NPRIMES]; // "Bit to clear" table, depends on exponent and k0; initialized once per exponent. DEVICE int btcTabs[NGOODCLASS][NPRIMES]; // Sieved Ks table. sieve() outputs here, test() reads from here. // kTabSize contains the size of kTab, and must be set to 0 before each sieve() invocation. DEVICE u32 kTab[KTAB_SIZE]; DEVICE u32 kTabSize; // If a factor m is found, save it here. DEVICE U3 foundFactor = (U3) {0, 0, 0}; // The class id for each good class; set by initClassTab(). DEVICE u16 classTab[NGOODCLASS]; // Pinned pieces of host memory used to copy to/from GPU. U3 *hostFactor; u32 *hostN; // Address of kTabSize u32 *kTabSizeHost; cudaStream_t stream; // Helper to check and bail out on any CUDA error. #define CUDA_CHECK {cudaError_t _err = cudaGetLastError(); if (_err) { printf("CUDA error: %s\n", cudaGetErrorString(_err)); return 0; }} u64 timeMillis() { struct timeval tv; gettimeofday(&tv, 0); return tv.tv_sec * 1000 + tv.tv_usec / 1000; } // Returns x % m, given u the "inverse" of m (2**160 / m); m at most 77 bits. DEVICE U3 mod(U5 x, U3 m, U3 u) { return (U3){x.a, x.b, x.c} - mulLow(m, mulHi((U3) {x.c, x.d, x.e}, u)); } // float lower approximation of 2**32 / x DEVICE float floatInv(U3 x) { return __frcp_rd(__ull2float_ru(_u64(shr1w(x)) + 1)); } // float lower approximation of a + b * 2**32; (__fmaf_rz(b, TWO32f, a)) DEVICE float floatOf(u32 a, u32 b) { return __ull2float_rz(_u64((U2) {a, b})); } // float lower approximation of (a + b * 2**32) * nf DEVICE float floatOf(u32 a, u32 b, float nf) { return __fmul_rz(floatOf(a, b), nf); } // Returns 2**160 / n DEVICE U3 inv160(U3 n, float nf) { // 1 assert(nf * TWO64f < TWO32f); u32 rc = (u32) __fmul_rz(TWO64f, nf); U4 q = shl1w(~mulLow(n, rc) + 1); // 2 float qf = floatOf(q.c, q.d, nf) * TWO16f; assert(qf < TWO28f); u32 qi = (u32) qf; u32 rb = (qi << 16); rc += (qi >> 16); q = q - ((n * qi) << 16); assert(q.d == 0); // 3 qf = floatOf(q.b, q.c, nf); assert(qf < (1 << 24)); qi = (u32) qf; U2 rup = (U2){rb, rc} + qi; q = q - n * qi; assert(q.d == 0); // 4 qf = floatOf(q.b, q.c, nf) * TWO17f; assert(qf < (1 << 22)); qi = (u32) qf; rup = rup + (qi >> 17); U3 ret = (U3) {(qi << 15), rup.a, rup.b}; q = ((U4) {0, q.a, q.b, q.c}) - ((n * qi) << 15); assert(q.d == 0); // 5 qf = floatOf(q.b, q.c, nf); assert(qf < (1 << 20)); return ret + (u32) qf; #endif } DEVICE u32 modInv32(u64 step, u32 prime) { int n = step % prime; int q = prime / n; int d = prime - q * n; int x = -q; int prevX = 1; while (d) { q = n / d; { int save = d; d = n - q * d; n = save; } // n = set(d, n - q * d); { int save = x; x = prevX - q * x; prevX = save; } // prevX = set(x, prevX - q * x); } return (prevX >= 0) ? prevX : (prevX + prime); } // 3 times 64bit modulo, expensive! DEVICE int bitToClear(u32 exp, u64 k, u32 prime, u32 inv) { u32 kmod = k % prime; u32 qmod = (kmod * (u64) (exp << 1) + 1) % prime; return (prime - qmod) * (u64) inv % prime; } __global__ void initInvTab(u32 exp) { assert(gridDim.x * blockDim.x == NPRIMES); u32 id = blockIdx.x * blockDim.x + threadIdx.x; invTab[id] = modInv32(2 * NCLASS * (u64) exp, primes[id]); } __global__ void initBtcTabs(u32 exp, u64 kBase) { assert(gridDim.x == NGOODCLASS); int *btcTab = btcTabs[blockIdx.x]; u64 k = kBase + classTab[blockIdx.x]; // if (!threadIdx.x) { printf("start class %d (%d)\n", classTab[blockIdx.x], blockIdx.x); } for (int id = threadIdx.x; id < NPRIMES; id += blockDim.x) { btcTab[id] = bitToClear(exp, k, primes[id], invTab[id]); } // if (!threadIdx.x) { printf("ended class %d (%d)\n", classTab[blockIdx.x], blockIdx.x); } } // Returns whether 2 * c * exp + 1 is 1 or 7 modulo 8. // Any Marsenne factor must be of this form. See http://www.mersenne.org/various/math.php DEVICE bool q1or7mod8(u32 exp, u32 c) { return !(c & 3) || ((c & 3) + (exp & 3) == 4); } // whether 2 * c * exp + 1 != 0 modulo prime DEVICE bool multiple(u32 exp, u32 c, unsigned prime) { return (2 * c * (u64) exp) % prime == (prime - 1); } // Among all the NCLASS classes, select the ones that are "good", // i.e. not corresponding to a multiple of a small prime. __global__ void initClasses(u32 exp) { __shared__ u32 pos; pos = 0; __syncthreads(); for (int c = threadIdx.x; c < NCLASS; c += blockDim.x) { if (q1or7mod8(exp, c) && !multiple(exp, c, 3) && !multiple(exp, c, 5) && !multiple(exp, c, 7) && !multiple(exp, c, 11)) { classTab[atomicAdd(&pos, 1)] = c; } } #ifndef NDEBUG __syncthreads(); assert(pos == NGOODCLASS); #endif } // Returns (2**exp % m) == 1 DEVICE bool expMod(u32 exp, U3 m, U3 b) { assert(m.c && !(m.c & 0xffffc000)); float nf = floatInv(m); U3 u = inv160(m, nf); U3 a = mod((U5) {0, 0, b.a, b.b, b.c}, m, u); do { a = mod(square(a), m, u); if (exp & 0x80000000) { a = a + a; } } while (exp += exp); a = a - mulLow(m, (u32) floatOf(a.b, a.c, nf)); if (a.c >= m.c && a.a == (m.a + 1)) { a = a - m; } return !(a.b | a.c | (a.a - 1)); // a.a == 1 && !a.b && !a.c; } __global__ void test(u32 doubleExp, u32 flushedExp, U3 m0, U3 b) { for (u32 i = blockIdx.x * blockDim.x + threadIdx.x, end = kTabSize; i < end; i += blockDim.x * gridDim.x) { U3 m = m0 + _U2(kTab[i] * (u64) doubleExp); if (expMod(flushedExp, m, b)) { foundFactor = m; } } } __global__ void trysub(U3 a, U3 b) { // a.a |= threadIdx.x; foundFactor = a - b; print("x", foundFactor); } __global__ void tryadd(U3 a, U3 b) { foundFactor = a + b; } // Sieve bits using shared memory. // For each prime from the primes[] table, starting at a position corresponding to a // multiple of prime ("btc"), periodically set the bit to indicate a non-prime. __global__ void sieve() { __shared__ u32 words[NWORDS]; // Set shared memory to zero. for (int i = threadIdx.x; i < NWORDS; i += blockDim.x) { words[i] = 0; } for (int loop = blockIdx.x; loop < NGOODCLASS; loop += gridDim.x) { __syncthreads(); // Sieve bits. int *btcTab = btcTabs[loop]; for (int i = threadIdx.x; i < (NPRIMES - 32); i += blockDim.x) { int btc = btcTab[i]; int prime = primes[i]; while (btc < NBITS) { atomicOr(words + (btc >> 5), 1 << (btc & 0x1f)); btc += prime; } btcTab[i] = btc - NBITS; } __syncthreads(); int popc = 0; for (int i = threadIdx.x; i < NWORDS; i += blockDim.x) { popc += __popc(~words[i]); } u32 *out = kTab + atomicAdd(&kTabSize, popc); u32 c = classTab[loop]; for (int i = threadIdx.x; i < NWORDS; i += blockDim.x) { u32 bits = ~words[i]; words[i] = 0; while (bits) { int bit = __clz(__brev(bits)); // Equivalent to: __ffs(bits) - 1; bits &= bits - 1; // Equivalent to: bits &= ~(1 << bit); but likely faster *out++ = c + ((i << 5) + bit) * NCLASS; // dummy += c + ((i << 5) + bit) * NCLASS; ++out; } } } } // int bit = bfind(bits); // bits &= ~(1 << bit); // The smallest k that produces a factor m = (2*k*exp + 1) such that m >= 2**bits u64 calculateK(u32 exp, int bits) { return ((((u128) 1) << (bits - 1)) + (exp - 2)) / exp; } void time(const char *s = 0) { static u64 prev = 0; u64 now = timeMillis(); if (prev && s) { printf("%s: %llu ms\n", s, now - prev); } prev = now; } void initExponent(u32 exp) { initClasses<<<1, 1024>>>(exp); initInvTab<<<NPRIMES/TEST_THREADS, TEST_THREADS>>>(exp); // time("init Exp"); } u128 _u128(U3 x) { return x.a | (((u64) x.b) << 32) | (((u128) x.c) << 64); } U3 _U3(u128 x) { return (U3) {(u32) x, (u32)(((u64)x) >> 32), (u32)(x >> 64)}; } u32 oneShl(unsigned sh) { return (sh < 32) ? (1 << sh) : 0; } u128 factor(u32 exp, u64 k0, u32 repeat) { initBtcTabs<<<NGOODCLASS, TEST_THREADS>>>(exp, k0); // cudaDeviceSynchronize(); CUDA_CHECK; time("init K"); u32 doubleExp = exp + exp; u32 flushedExp = exp << __builtin_clz(exp); assert(flushedExp & 0x80000000); unsigned sh; if ((flushedExp >> 25) < 80) { sh = flushedExp >> 24; flushedExp <<= 8; } else { sh = flushedExp >> 25; flushedExp <<= 7; } assert(sh >= 80 && sh < 160); U3 b = (U3) {oneShl(sh - 64), oneShl(sh - 96), oneShl(sh - 128)}; *hostFactor = (U3) {0, 0, 0}; *hostN = 0; cudaMemcpyToSymbolAsync(foundFactor, hostFactor, sizeof(U3), 0, cudaMemcpyHostToDevice, stream); int minLeft = 1000000; repeat = 4; for (int i = 0; i < repeat; ++i, k0 += NBITS * NCLASS) { if (i == 0) { cudaMemcpyAsync(kTabSizeHost, hostFactor, sizeof(u32), cudaMemcpyHostToDevice, stream); sieve<<<SIEVE_BLOCKS, SIEVE_THREADS, 0, stream>>>(); } cudaMemcpyAsync(hostN, kTabSizeHost, sizeof(u32), cudaMemcpyDeviceToHost, stream); U3 m = _U3(doubleExp * (u128) k0) | 1; assert(m.c); test<<<TEST_BLOCKS, TEST_THREADS, 0, stream>>>(doubleExp, flushedExp, m, b); cudaMemcpyFromSymbolAsync(hostFactor, foundFactor, sizeof(U3), 0, cudaMemcpyDeviceToHost, stream); cudaStreamSynchronize(stream); CUDA_CHECK; int spaceLeft = ASIZE(kTab) - *hostN; if (spaceLeft < minLeft) { minLeft = spaceLeft; } printf("%u %d ", *hostN, spaceLeft); time("time"); if (hostFactor->a != 0) { if (repeat > 1) { printf("Did %d cycles out of %d\n", i + 1, repeat); } return _u128(*hostFactor); } } printf("min space left %d\n", minLeft); return 0; } u128 factor(u32 exp, u32 startPow2) { // printf("%d\n", startPow2); u64 k0 = calculateK(exp, startPow2); k0 -= k0 % NCLASS; u64 kEnd = calculateK(exp, startPow2 + 1); kEnd += (NCLASS - (kEnd % NCLASS)) % NCLASS; u32 repeat = (kEnd - k0 + (NBITS * NCLASS - 1)) / (NBITS * NCLASS); // printf("kend %llu\n", kEnd); return factor(exp, k0, repeat); } bool verifyFactor(u32 exp, u64 k) { u64 k0 = k - (k % NCLASS); u128 m = factor(exp, k0, 1); if (m != 2 * exp * (u128) k + 1) { printf("\nFAIL: %u %llu\n", exp, k); return false; } return true; } bool run(int argc, char **argv) { if (argc == 1) { printf("Quick selftest..\n"); for (Test *t = tests, *end = tests + ASIZE(tests); t < end; ++t) { u32 exp = t->exp; u64 k = t->k; printf("\r%4d: %9u %15llu ", (int) (t - tests), exp, k); initExponent(exp); if (!verifyFactor(exp, k)) { printf("\nFAIL: %u %llu\n", exp, k); return false; } } printf("\nExtended selftest..\n"); for (Test *t = tests, *end = tests + ASIZE(tests); t < end; ++t) { u32 exp = t->exp; u64 k = t->k; printf("\r%4d: %9u %15llu\n", (int) (t - tests), exp, k); initExponent(exp); u128 m = 2 * exp * (u128) k + 1; u32 mup = (u32)(m >> 64); assert(mup); u32 pow2 = 95 - __builtin_clz(mup); u128 m2 = factor(exp, pow2); if (m2 != m) { printf("\nFAIL: %u %llu\n", exp, k); return false; } } } else { u32 exp = (u32) atol(argv[1]); int startPow2 = (argc >= 3) ? atoi(argv[2]) : 65; initExponent(exp); u128 m = factor(exp, startPow2); if (m != 0) { printf("m: 0x%016llx%016llx\n", (u64) (m >> 64), (u64) m); } } return true; } int main(int argc, char **argv) { time(); assert(argc > 0); assert(NPRIMES % 1024 == 0); // cudaSetDevice(1); cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync); CUDA_CHECK; cudaStreamCreate(&stream); cudaHostAlloc((void **) &hostFactor, sizeof(U3), cudaHostAllocDefault); cudaHostAlloc((void **) &hostN, sizeof(u32), cudaHostAllocDefault); cudaGetSymbolAddress((void **)&kTabSizeHost, kTabSize); cudaDeviceSynchronize(); CUDA_CHECK; time("host alloc"); U3 a{0, 0, 1}; U3 b{1, 0, 0}; trysub<<<1, 1>>>(a, b); cudaDeviceSynchronize(); cudaMemcpyFromSymbol(hostFactor, foundFactor, sizeof(U3), 0); // print("x", *hostFactor); run(argc, argv); // Clean-up before exit cudaDeviceSynchronize(); cudaFreeHost(hostFactor); cudaFreeHost(hostN); cudaStreamDestroy(stream); cudaDeviceReset(); }
65c2fdba2c7c8e02e333f1a19ee9945a672c23b8.hip
// !!! This is a file automatically generated by hipify!!! #include <cstdio> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <cmath> #include <thrust/execution_policy.h> #include <thrust/random.h> #include <thrust/remove.h> #include <glm/gtc/matrix_transform.hpp> #include <glm/gtc/matrix_inverse.hpp> #include "sceneStructs.h" #include "scene.h" #include "glm/glm.hpp" #include "glm/gtx/norm.hpp" #include "utilities.h" #include "pathtrace.h" #include "intersections.h" #include "interactions.h" #define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__) #define checkCUDAError(msg) checkCUDAErrorFn(msg, FILENAME, __LINE__) inline int ilog2(int x) { int lg = 0; while (x >>= 1) { ++lg; } return lg; } inline int ilog2ceil(int x) { return ilog2(x - 1) + 1; } void checkCUDAErrorFn(const char *msg, const char *file, int line) { hipDeviceSynchronize(); hipError_t err = hipGetLastError(); if (hipSuccess == err) { return; } fprintf(stderr, "CUDA error"); if (file) { fprintf(stderr, " (%s:%d)", file, line); } fprintf(stderr, ": %s: %s\n", msg, hipGetErrorString(err)); exit(EXIT_FAILURE); } __host__ __device__ thrust::default_random_engine makeSeededRandomEngine(int iter, int index, int depth) { int h = utilhash((1 << 31) | (depth << 22) | iter) ^ utilhash(index); return thrust::default_random_engine(h); } //Kernel that writes the image to the OpenGL PBO directly. __global__ void sendImageToPBO(uchar4* pbo, glm::ivec2 resolution, int iter, glm::vec3* image) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; if (x < resolution.x && y < resolution.y) { int index = x + (y * resolution.x); glm::vec3 pix = image[index]; glm::ivec3 color; color.x = glm::clamp((int) (pix.x / iter * 255.0), 0, 255); color.y = glm::clamp((int) (pix.y / iter * 255.0), 0, 255); color.z = glm::clamp((int) (pix.z / iter * 255.0), 0, 255); // Each thread writes one pixel location in the texture (textel) pbo[index].w = 0; pbo[index].x = color.x; pbo[index].y = color.y; pbo[index].z = color.z; } } static Scene *hst_scene = NULL; static glm::vec3 *dev_image = NULL; static Geom *dev_geoms = NULL; static Material *dev_mats = NULL; static Ray *dev_rayArray = NULL; int* dev_bools; int* dev_indices; // TODO: static variables for device memory, scene/camera info, etc // ... void pathtraceInit(Scene *scene) { hst_scene = scene; const Camera &cam = hst_scene->state.camera; const int pixelcount = cam.resolution.x * cam.resolution.y; const Geom *geoms = &(hst_scene->geoms)[0]; const Material *mats = &(hst_scene->materials)[0]; hipMalloc(&dev_geoms, pixelcount * sizeof(Geom)); hipMalloc(&dev_mats, pixelcount * sizeof(Material)); hipMalloc(&dev_rayArray, pixelcount * sizeof(Ray)); hipMalloc(&dev_image, pixelcount * sizeof(glm::vec3)); hipMemset(dev_image, 0, pixelcount * sizeof(glm::vec3)); hipMemset(dev_rayArray, 0, pixelcount * sizeof(Ray)); hipMemcpy(dev_mats, mats, hst_scene->materials.size() * sizeof(Material), hipMemcpyHostToDevice); // TODO: initialize the above static variables added above checkCUDAError("pathtraceInit"); } void pathtraceFree() { hipFree(dev_image); // no-op if dev_image is null // TODO: clean up the above static variables hipFree(dev_geoms); hipFree(dev_mats); hipFree(dev_rayArray); checkCUDAError("pathtraceFree"); } /** * Example function to generate static and test the CUDA-GL interop. * Delete this once you're done looking at it! */ __global__ void generateNoiseDeleteMe(Camera cam, int iter, glm::vec3 *image) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; if (x < cam.resolution.x && y < cam.resolution.y) { int index = x + (y * cam.resolution.x); thrust::default_random_engine rng = makeSeededRandomEngine(iter, index, 0); thrust::uniform_real_distribution<float> u01(0, 1); // CHECKITOUT: Note that on every iteration, noise gets added onto // the image (not replaced). As a result, the image smooths out over // time, since the output image is the contents of this array divided // by the number of iterations. // // Your renderer will do the same thing, and, over time, it will become // smoother. image[index] += glm::vec3(u01(rng)); } } //Create ray to be shot at a pixel in the image __global__ void kernRayGenerate(Camera cam, Ray *ray, int iter, bool dof){ int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int index = x + (y*cam.resolution.x); thrust::default_random_engine rng = makeSeededRandomEngine(iter, index, 0); thrust::uniform_real_distribution<float> unitDistrib(-.5f, .5f); thrust::uniform_real_distribution<float> dofDistrib(-1.0f, 1.0f); //Calculate camera's world position if (x < cam.resolution.x && y < cam.resolution.y) { glm::vec3 A = glm::cross(cam.view, cam.up); glm::vec3 B = glm::cross(A, cam.view); glm::vec3 M = cam.position + cam.view; float lenC = glm::length(cam.view); float lenA = glm::length(A); float lenB = glm::length(B); float tantheta = (float)cam.resolution.x; tantheta /= (float)cam.resolution.y; tantheta *= tan((float)glm::radians(cam.fov[1])); glm::vec3 H = (A*lenC*tantheta) / lenA; glm::vec3 V = (B*lenC*tan((float)glm::radians(cam.fov[1]))) / lenB; //Create ray with direction and origin //Jitter rays with uniform distribution //printf("%f ", unitDistrib(rng)); float sx = ((float)x + unitDistrib(rng)) / ((float)cam.resolution.x - 1.0f); float sy = ((float)y + unitDistrib(rng)) / ((float)cam.resolution.y - 1.0f); //Get world coordinates of pixel glm::vec3 WC = M - (2.0f*sx - 1.0f)*H - (2.0f*sy - 1.0f)*V; //Get direction of ray glm::vec3 dir = glm::normalize(WC - cam.position); ray[index].origin = cam.position; ray[index].direction = dir; ray[index].color = glm::vec3(1.0, 1.0, 1.0); ray[index].index = index; ray[index].terminated = false; ray[index].out = true; if (dof == true) { glm::vec3 apOff = glm::vec3(dofDistrib(rng), dofDistrib(rng), 0.0f); glm::vec3 new_E = cam.position + apOff; float focal = 11.5866f; //glm::length(glm::vec3(-2.0f, 5.0f,2.0f) - new_E); dir *= focal; dir -= apOff; dir = glm::normalize(dir); ray[index].origin = new_E; ray[index].direction = dir; } } } //Helper function to get random point on cubic light __device__ glm::vec3 getRandomPointOnCube(Geom node, int iter, int index) { // TODO: get the dimensions of the transformed cube in world space glm::vec3 dim(0.0f, 0.0f, 0.0f); dim = node.scale; // Get surface area of the cube float side1 = dim[0] * dim[1]; // x-y float side2 = dim[1] * dim[2]; // y-z float side3 = dim[0] * dim[2]; // x-z float totalArea = 2.0f * (side1 + side2 + side3); thrust::default_random_engine rng = makeSeededRandomEngine(iter, index, 0); thrust::uniform_real_distribution<float> unitDistrib(-.5f, .5f); thrust::uniform_real_distribution<float> dofDistrib(0.0f, 1.0f); // pick random face weighted by surface area float r = floor(dofDistrib(rng)); // pick 2 random components for the point in the range (-0.5, 0.5) float c1 = unitDistrib(rng); float c2 = unitDistrib(rng); glm::vec3 point; if (r < side1 / totalArea) { // x-y front point = glm::vec3(c1, c2, 0.5f); } else if (r < (side1 * 2) / totalArea) { // x-y back point = glm::vec3(c1, c2, -0.5f); } else if (r < (side1 * 2 + side2) / totalArea) { // y-z front point = glm::vec3(0.5f, c1, c2); } else if (r < (side1 * 2 + side2 * 2) / totalArea) { // y-z back point = glm::vec3(-0.5f, c1, c2); } else if (r < (side1 * 2 + side2 * 2 + side3) / totalArea) { // x-z front point = glm::vec3(c1, 0.5f, c2); } else { // x-z back point = glm::vec3(c1, -0.5f, c2); } // TODO: transform point to world space glm::mat4 T(1.0f); T = glm::translate(T, node.translation); if (node.rotation[0] != 0){ T = glm::rotate(T, node.rotation[0]*(PI/180.0f), glm::vec3(1,0,0)); } if (node.rotation[1] != 0){ T = glm::rotate(T, node.rotation[1]*(PI/180.0f), glm::vec3(0,1,0)); } if (node.rotation[2] != 0){ T = glm::rotate(T, node.rotation[2]*(PI/180.0f), glm::vec3(0,0,1)); } //T = glm::scale(T, node.scale); glm::vec4 newPoint = T*glm::vec4(point, 1.0f); point = glm::vec3(newPoint[0], newPoint[1], newPoint[2]); return point; } //Helper function to get random point on spherical light /*__device__ glm::vec3 getRandomPointOnSphere(Geom node, int iter, int index) { // generate u, v, in the range (0, 1) float u = static_cast <float> (rand()) / static_cast <float> (RAND_MAX); float v = static_cast <float> (rand()) / static_cast <float> (RAND_MAX); float theta = 2.0f * PI * u; float phi = acos(2.0f * v - 1.0f); // find x, y, z coordinates assuming unit sphere in object space glm::vec3 point; point[0] = sin(phi) * cos(theta); point[1] = sin(phi) * sin(theta); point[2] = cos(phi); // TODO: transform point to world space glm::mat4 T(1.0f); T = glm::translate(T, node.translation); if (node.rotation[0] != 0){ T = glm::rotate(T, node.rotation[0]*(PI/180.0f), glm::vec3(1,0,0)); } if (node.rotation[1] != 0){ T = glm::rotate(T, node.rotation[1]*(PI/180.0f), glm::vec3(0,1,0)); } if (node.rotation[2] != 0){ T = glm::rotate(T, node.rotation[2]*(PI/180.0f), glm::vec3(0,0,1)); } glm::vec4 newPoint = T*glm::vec4(point, 1.0f); point = glm::vec3(newPoint[0], newPoint[1], newPoint[2]); return point; }*/ //Helper function to find closest intersection __device__ float closestIntersection(Ray ray, const Geom* geoms, glm::vec3 &intersectionPoint, glm::vec3 &normal, bool &outside, int &objIndex, const int numGeoms){ glm::vec3 interPoint; glm::vec3 norm; bool out; float t = -1; float dist; for (int i = 0; i < numGeoms; i++) { if (geoms[i].type == CUBE) { dist = boxIntersectionTest(geoms[i], ray, interPoint, norm, out); } else if (geoms[i].type == SPHERE) { dist = sphereIntersectionTest(geoms[i], ray, interPoint, norm, out); } if ((dist != -1 && dist < t) || t == -1) { t = dist; intersectionPoint = interPoint; normal = norm; outside = out; objIndex = i; } } return t; } //Function to find next ray __global__ void kernPathTracer(Camera cam, Ray* rayArray, const Geom* geoms, const Material* mats, const int numGeoms, const int numMats, glm::vec3* dev_image, int iter, int depth, int traceDepth, bool m_blur, int size){ //int x = (blockIdx.x * blockDim.x) + threadIdx.x; //int y = (blockIdx.y * blockDim.y) + threadIdx.y; //int index = x + (y * cam.resolution.x); int index = (blockIdx.x * blockDim.x) + threadIdx.x; int imageSize = (cam.resolution.x * cam.resolution.y); //find closest intersection /*if (rayArray[index].terminated == true && index < size) { dev_image[rayArray[index].index] = glm::vec3(1.0, 0.0, 0.0); printf("in here: %i, %i, %i \n", size, index, rayArray[index].index); return; } else { //printf("is fine: %i, %i, %i \n", depth, index, rayArray[index].index); }*/ if (index < size && rayArray[index].terminated == false) {//rayArray[index].index < imageSize && index < size && rayArray[index].terminated == false) {//x < cam.resolution.x && y < cam.resolution.y && rayArray[index].terminated == false) { thrust::default_random_engine rng = makeSeededRandomEngine(iter, index, depth); glm::vec3 interPoint; glm::vec3 norm; bool out; int objIndex; if (depth == traceDepth) { dev_image[rayArray[index].index] == glm::vec3(0.0f, 0.0f, 0.0f); return; /*for (int i = 0; i < numGeoms; i++) { if (mats[geoms[i].materialid].emittance > 0 && mats[geoms[rayArray[index].geomid].materialid].emittance == 0 && mats[geoms[rayArray[index].geomid].materialid].hasReflective == 0 && mats[geoms[rayArray[index].geomid].materialid].hasRefractive == 0) { glm::vec3 new_pt = getRandomPointOnCube(geoms[i], iter, index); rayArray[index].direction = rayArray[index].origin + glm::normalize(new_pt - rayArray[index].origin); float t = closestIntersection(rayArray[index], geoms, interPoint, norm, out, objIndex, numGeoms); if (objIndex == i) { printf("hit light in direct"); rayArray[index].color *= mats[geoms[i].materialid].emittance*mats[geoms[objIndex].materialid].color; dev_image[index] += rayArray[index].color; } } }*/ } //Geom* m_blur_geoms = new Geom[numGeoms]; float t; if (m_blur) { /*for (int i = 0; i < numGeoms; i++) { m_blur_geoms[i] = geoms[i]; m_blur_geoms[i].translation.x += m_blur_geoms[i].move.x*rayArray[index].time; m_blur_geoms[i].translation.y += m_blur_geoms[i].move.y*rayArray[index].time; m_blur_geoms[i].translation.z += m_blur_geoms[i].move.z*rayArray[index].time; }*/ t = closestIntersection(rayArray[index], geoms, interPoint, norm, out, objIndex, numGeoms); } else { t = closestIntersection(rayArray[index], geoms, interPoint, norm, out, objIndex, numGeoms); } rayArray[index].geomid = objIndex; //get direction of next ray and compute new color if (t >= 0.0f) { if (mats[geoms[objIndex].materialid].emittance >= 1) { rayArray[index].color *= mats[geoms[objIndex].materialid].emittance*mats[geoms[objIndex].materialid].color; dev_image[rayArray[index].index] += rayArray[index].color; rayArray[index].terminated = true; } else { scatterRay(rayArray[index], rayArray[index].color, interPoint, norm, mats[geoms[objIndex].materialid], out, rng); } } else { //dev_image[index] *= glm::vec3(0.0f, 0.0f, 0.0f); //rayArray[index].color; rayArray[index].terminated = true; } } } __global__ void kernCombine(int *maxArray, int *newData, int n) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index < n) { //printf("index: %i max: %i \n", g_idata[index], maxArray[blockIdx.x]); newData[index] = newData[index] + maxArray[blockIdx.x]; } } __global__ void kernScan(int *maxArray, int *g_idata, int n) { //printf("IN THIN FUNCTION AELFHGGGGHF"); extern __shared__ int temp[]; //printf("blockId: %i", blockDim.x); int thid = threadIdx.x + (blockIdx.x * blockDim.x); int t = threadIdx.x; int offset = 1; temp[2*t] = g_idata[2*thid]; temp[2*t+1] = g_idata[2*thid+1]; for (int d = (2*blockDim.x)>>1; d > 0; d >>=1) { __syncthreads(); if (t < d) { int ai = offset*(2*t+1)-1; int bi = offset*(2*t+2)-1; temp[bi] += temp[ai]; } offset *= 2; } if (t == 0) { temp[2*blockDim.x-1] = 0; } for (int d = 1; d < (2*blockDim.x); d*=2) { offset >>= 1; __syncthreads(); if (t < d) { int ai = offset *(2*t+1)-1; int bi = offset *(2*t+2)-1; float t2 = temp[ai]; temp[ai] = temp[bi]; temp[bi] += t2; } } __syncthreads(); if (t == (blockDim.x - 1)) { maxArray[blockIdx.x] = temp[2*t+1] + g_idata[2*thid+1]; } g_idata[2*thid] = temp[2*t]; //printf("(%i, %i) \n", thid, g_idata[2*thid]); g_idata[2*thid+1] = temp[2*t+1]; } void scan(int n, const int *idata, int *odata, int newSize) { int blockSize = 32; int numBlocks = ceil((float)n / (float)blockSize); int powTwo = 1<<ilog2ceil(n); dim3 fullBlocksPerGrid(((powTwo/2) + blockSize - 1) / blockSize); int* maxArray; int* newArray; int randomNum = 0; int* g_idata; hipMalloc((void**)&g_idata, powTwo * sizeof(int)); checkCUDAError("pathtrace"); hipMalloc((void**)&newArray, powTwo * sizeof(int)); checkCUDAError("pathtrace"); hipMemset(newArray, 0, powTwo * sizeof(int)); checkCUDAError("pathtrace"); newSize = n; int* scanArray = new int[n]; //scanArray[0] = 0; for (int i = 0; i < n; i++) { scanArray[i] = idata[i]; //printf("bool %i: %i \n", i, scanArray[i]); } hipMalloc((void**)&maxArray, (((powTwo/2) + blockSize - 1) / blockSize) * sizeof(int)); checkCUDAError("pathtrace"); hipMemcpy(newArray, scanArray, n*sizeof(int), hipMemcpyHostToDevice); //printf("fullblocks: %i \n", ((powTwo/2) + blockSize - 1) / blockSize); hipDeviceSynchronize(); checkCUDAError("pathtrace"); hipLaunchKernelGGL(( kernScan), dim3(fullBlocksPerGrid), dim3(blockSize), 2*blockSize*sizeof(int), 0, maxArray, newArray, n); checkCUDAError("pathtrace"); hipDeviceSynchronize(); hipMemcpy(odata, newArray, n*sizeof(int), hipMemcpyDeviceToHost); int maxSize = ((powTwo/2) + blockSize - 1) / blockSize; if (maxSize != 1) { int* hst_maxArray = new int[maxSize]; int* scanMax = new int[maxSize]; int* dev_scanMax; hipMalloc((void**)&dev_scanMax, maxSize*sizeof(int)); hipMemcpy(hst_maxArray, maxArray, maxSize*sizeof(int), hipMemcpyDeviceToHost); //printf("%i ", hst_maxArray[maxSize - 1]); scan(maxSize, hst_maxArray, scanMax, randomNum); //hipMemcpy(odata, g_idata, n*sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(dev_scanMax, scanMax, maxSize*sizeof(int), hipMemcpyHostToDevice); //kernCombine<<<fullBlocksPerGrid, blockSize>>>(dev_scanMax, g_idata, n); //hipMemcpy(odata, g_idata, n*sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(newArray, odata, n*sizeof(int), hipMemcpyHostToDevice); /*for (int i = 0; i < maxSize; i++) { for (int j = blockSize*2*i; j < blockSize*2*(i+1)) { odata[] } }*/ hipLaunchKernelGGL(( kernCombine), dim3(fullBlocksPerGrid), dim3(blockSize*2), 0, 0, dev_scanMax, newArray, n); //checkCUDAError("pathtrace"); hipMemcpy(odata, newArray, n*sizeof(int), hipMemcpyDeviceToHost); newSize = hst_maxArray[maxSize - 1]; hipFree(dev_scanMax); } //printf(" don with function "); hipFree(maxArray); hipFree(g_idata); hipFree(newArray); //checkCUDAError("pathtrace"); } __global__ void kernScatter(int n, Ray *odata, const Ray *idata, const int *bools, const int *indices) { int thrId = threadIdx.x + (blockIdx.x * blockDim.x); if (thrId < n) { //printf("BOOLS: %i \n", bools[thrId]); if (bools[thrId] == 1) { //printf("old index: %i , new index: %i \n", thrId, indices[thrId]); odata[indices[thrId]] = idata[thrId]; //printf("old ray index: %i, new ray index: %i \n", idata[thrId].index, odata[indices[thrId]].index); } } } /** * Wrapper for the __global__ call that sets up the kernel calls and does a ton * of memory management */ void pathtrace(uchar4 *pbo, int frame, int iter) { //printf("iter: %i \n", iter); const int traceDepth = hst_scene->state.traceDepth; const Camera &cam = hst_scene->state.camera; const int pixelcount = cam.resolution.x * cam.resolution.y; const Geom *geoms = &(hst_scene->geoms)[0]; Geom *m_blur_geoms = &(hst_scene->geoms)[0]; int numGeoms = hst_scene->geoms.size(); int numMats = hst_scene->materials.size(); Ray *rayArray = new Ray[pixelcount]; int max_iter = 1000; //hst_scene->state.iterations; const int blockSideLength = 8; const dim3 blockSize(blockSideLength, blockSideLength); checkCUDAError("pathtrace"); const dim3 blocksPerGrid( (cam.resolution.x + blockSize.x - 1) / blockSize.x, (cam.resolution.y + blockSize.y - 1) / blockSize.y); //const dim3 blocksPerGrid((pixelcount + blockSize - 1) / blockSize); /////////////////////////////////////////////////////////////////////////// // Recap: // * Initialize array of path rays (using rays that come out of the camera) // * You can pass the Camera object to that kernel. // * Each path ray is a (ray, color) pair, where color starts as the // multiplicative identity, white = (1, 1, 1). // * For debugging, you can output your ray directions as colors. // * For each depth: // * Compute one new (ray, color) pair along each path (using scatterRay). // Note that many rays will terminate by hitting a light or hitting // nothing at all. You'll have to decide how to represent your path rays // and how you'll mark terminated rays. // * Color is attenuated (multiplied) by reflections off of any object // surface. // * You can debug your ray-scene intersections by displaying various // values as colors, e.g., the first surface normal, the first bounced // ray direction, the first unlit material color, etc. // * Add all of the terminated rays' results into the appropriate pixels. // * Stream compact away all of the terminated paths. // You may use either your implementation or `thrust::remove_if` or its // cousins. // * Finally, handle all of the paths that still haven't terminated. // (Easy way is to make them black or background-colored.) // TODO: perform one iteration of path tracing bool dof = true; bool m_blur = false; bool streamCompaction = true; int size = pixelcount; if (m_blur && iter < max_iter) { for (int i = 0; i < numGeoms; i++) { m_blur_geoms[i] = geoms[i]; m_blur_geoms[i].translation.x += geoms[i].move.x / (float)max_iter; m_blur_geoms[i].translation.y += geoms[i].move.y / (float)max_iter; m_blur_geoms[i].translation.z += geoms[i].move.z / (float)max_iter; m_blur_geoms[i].transform = utilityCore::buildTransformationMatrix(m_blur_geoms[i].translation, m_blur_geoms[i].rotation, m_blur_geoms[i].scale); m_blur_geoms[i].inverseTransform = glm::inverse(m_blur_geoms[i].transform); m_blur_geoms[i].invTranspose = glm::inverseTranspose(m_blur_geoms[i].transform); //printf("(%f, %f, %f)", m_blur_geoms[i].translation.x, m_blur_geoms[i].translation.y, m_blur_geoms[i].translation.z); } hipMemcpy(dev_geoms, m_blur_geoms, hst_scene->geoms.size() * sizeof(Geom), hipMemcpyHostToDevice); checkCUDAError("pathtrace"); } else { hipMemcpy(dev_geoms, geoms, hst_scene->geoms.size() * sizeof(Geom), hipMemcpyHostToDevice); } checkCUDAError("pathtrace"); int newSize = pixelcount; //printf("Heyo starting over \n newSize: %i \n", newSize); Ray* dev_rayShort; hipMalloc((void**)&dev_rayShort, pixelcount * sizeof(Ray)); checkCUDAError("pathtrace"); hipMalloc((void**)&dev_rayArray, pixelcount * sizeof(Ray)); checkCUDAError("pathtrace"); hipLaunchKernelGGL(( kernRayGenerate), dim3(blocksPerGrid), dim3(blockSize), 0, 0, cam, dev_rayArray, iter, dof); checkCUDAError("pathtrace"); //cuda events hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); checkCUDAError("pathtrace"); for (int i = 0; i < traceDepth + 1; i++) { int* hst_bool = new int[newSize]; int* dev_bool; int* dev_indices; int* hst_indices = new int[newSize]; int blockSizeNew = 64; dim3 blocksGridNew((newSize + blockSizeNew - 1) / blockSizeNew); checkCUDAError("pathtrace"); //printf("BlocksPerGrid: %i \n", (newSize + blockSizeNew - 1) / blockSizeNew); if (newSize == 0) { hipEventRecord(stop); break; } //printf("size: %i \n", newSize); hipLaunchKernelGGL(( kernPathTracer), dim3(blocksGridNew), dim3(blockSizeNew), 0, 0, cam, dev_rayArray, dev_geoms, dev_mats, numGeoms, numMats, dev_image, iter, i, traceDepth, m_blur, newSize); checkCUDAError("pathtrace"); hipMemcpy(rayArray, dev_rayArray, pixelcount*sizeof(Ray), hipMemcpyDeviceToHost); checkCUDAError("pathtrace"); if (streamCompaction) { /* for (int m = 0; m < newSize; m++) { float ran = rand()%100; if (ran < 50) { hst_bool[m] = 0; //printf("%i: %i \n", m, hst_bool[m]); } else { hst_bool[m] = 1; //printf("%i: %i \n", m, hst_bool[m]); } } if (newSize > 1) { scan(newSize, hst_bool, hst_indices, newSize); newSize = hst_bool[newSize - 1] + hst_indices[newSize - 1]; }*/ for (int m = 0; m < newSize; m++) { if (rayArray[m].terminated) { hst_bool[m] = 0; //printf("%i: %i \n", m, hst_bool[m]); } else { hst_bool[m] = 1; //printf("%i: %i \n", m, hst_bool[m]); //printf("%i: %i \n", m, hst_bool[m]); } } int oldSize = newSize; if (newSize > 1) { //printf("old Size: %i \n", oldSize ); scan(oldSize, hst_bool, hst_indices, newSize); newSize = hst_bool[oldSize - 1] + hst_indices[oldSize - 1]; //printf("new Size: %i \n", newSize ); hipMalloc((void**)&dev_bool, oldSize * sizeof(int)); hipMalloc((void**)&dev_indices, oldSize * sizeof(int)); hipMemcpy(dev_bool, hst_bool, oldSize*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(dev_indices, hst_indices, oldSize*sizeof(int), hipMemcpyHostToDevice); checkCUDAError("pathtrace"); //printf("%i, %i, %i \n", oldSize, blocksGridNew, blockSizeNew); hipLaunchKernelGGL(( kernScatter), dim3(blocksGridNew), dim3(blockSizeNew), 0, 0, oldSize, dev_rayShort, dev_rayArray, dev_bool, dev_indices); checkCUDAError("pathtrace"); //printf("newSize: %i \n", newSize); //hipMemcpy(rayArray, dev_rayShort, newSize*sizeof(Ray), hipMemcpyDeviceToHost); hipMemcpy(dev_rayArray, dev_rayShort, newSize*sizeof(Ray), hipMemcpyDeviceToDevice); checkCUDAError("pathtrace"); hipFree(dev_bool); hipFree(dev_indices); } int k; //std::cin >> k; } } hipEventRecord(stop); hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); //printf("time per iteration: %f \n", milliseconds); checkCUDAError("pathtrace"); hipMemcpy(rayArray, dev_rayArray, pixelcount*sizeof(Ray), hipMemcpyDeviceToHost); checkCUDAError("pathtrace"); //generateNoiseDeleteMe<<<blocksPerGrid, blockSize>>>(cam, iter, dev_image); /////////////////////////////////////////////////////////////////////////// // Send results to OpenGL buffer for rendering hipLaunchKernelGGL(( sendImageToPBO), dim3(blocksPerGrid), dim3(blockSize), 0, 0, pbo, cam.resolution, iter, dev_image); checkCUDAError("pathtrace"); // Retrieve image from GPU hipMemcpy(hst_scene->state.image.data(), dev_image, pixelcount * sizeof(glm::vec3), hipMemcpyDeviceToHost); hipFree(dev_rayShort); hipFree(dev_rayArray); checkCUDAError("pathtrace"); }
65c2fdba2c7c8e02e333f1a19ee9945a672c23b8.cu
#include <cstdio> #include <cuda.h> #include <cuda_runtime.h> #include <cmath> #include <thrust/execution_policy.h> #include <thrust/random.h> #include <thrust/remove.h> #include <glm/gtc/matrix_transform.hpp> #include <glm/gtc/matrix_inverse.hpp> #include "sceneStructs.h" #include "scene.h" #include "glm/glm.hpp" #include "glm/gtx/norm.hpp" #include "utilities.h" #include "pathtrace.h" #include "intersections.h" #include "interactions.h" #define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__) #define checkCUDAError(msg) checkCUDAErrorFn(msg, FILENAME, __LINE__) inline int ilog2(int x) { int lg = 0; while (x >>= 1) { ++lg; } return lg; } inline int ilog2ceil(int x) { return ilog2(x - 1) + 1; } void checkCUDAErrorFn(const char *msg, const char *file, int line) { cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError(); if (cudaSuccess == err) { return; } fprintf(stderr, "CUDA error"); if (file) { fprintf(stderr, " (%s:%d)", file, line); } fprintf(stderr, ": %s: %s\n", msg, cudaGetErrorString(err)); exit(EXIT_FAILURE); } __host__ __device__ thrust::default_random_engine makeSeededRandomEngine(int iter, int index, int depth) { int h = utilhash((1 << 31) | (depth << 22) | iter) ^ utilhash(index); return thrust::default_random_engine(h); } //Kernel that writes the image to the OpenGL PBO directly. __global__ void sendImageToPBO(uchar4* pbo, glm::ivec2 resolution, int iter, glm::vec3* image) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; if (x < resolution.x && y < resolution.y) { int index = x + (y * resolution.x); glm::vec3 pix = image[index]; glm::ivec3 color; color.x = glm::clamp((int) (pix.x / iter * 255.0), 0, 255); color.y = glm::clamp((int) (pix.y / iter * 255.0), 0, 255); color.z = glm::clamp((int) (pix.z / iter * 255.0), 0, 255); // Each thread writes one pixel location in the texture (textel) pbo[index].w = 0; pbo[index].x = color.x; pbo[index].y = color.y; pbo[index].z = color.z; } } static Scene *hst_scene = NULL; static glm::vec3 *dev_image = NULL; static Geom *dev_geoms = NULL; static Material *dev_mats = NULL; static Ray *dev_rayArray = NULL; int* dev_bools; int* dev_indices; // TODO: static variables for device memory, scene/camera info, etc // ... void pathtraceInit(Scene *scene) { hst_scene = scene; const Camera &cam = hst_scene->state.camera; const int pixelcount = cam.resolution.x * cam.resolution.y; const Geom *geoms = &(hst_scene->geoms)[0]; const Material *mats = &(hst_scene->materials)[0]; cudaMalloc(&dev_geoms, pixelcount * sizeof(Geom)); cudaMalloc(&dev_mats, pixelcount * sizeof(Material)); cudaMalloc(&dev_rayArray, pixelcount * sizeof(Ray)); cudaMalloc(&dev_image, pixelcount * sizeof(glm::vec3)); cudaMemset(dev_image, 0, pixelcount * sizeof(glm::vec3)); cudaMemset(dev_rayArray, 0, pixelcount * sizeof(Ray)); cudaMemcpy(dev_mats, mats, hst_scene->materials.size() * sizeof(Material), cudaMemcpyHostToDevice); // TODO: initialize the above static variables added above checkCUDAError("pathtraceInit"); } void pathtraceFree() { cudaFree(dev_image); // no-op if dev_image is null // TODO: clean up the above static variables cudaFree(dev_geoms); cudaFree(dev_mats); cudaFree(dev_rayArray); checkCUDAError("pathtraceFree"); } /** * Example function to generate static and test the CUDA-GL interop. * Delete this once you're done looking at it! */ __global__ void generateNoiseDeleteMe(Camera cam, int iter, glm::vec3 *image) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; if (x < cam.resolution.x && y < cam.resolution.y) { int index = x + (y * cam.resolution.x); thrust::default_random_engine rng = makeSeededRandomEngine(iter, index, 0); thrust::uniform_real_distribution<float> u01(0, 1); // CHECKITOUT: Note that on every iteration, noise gets added onto // the image (not replaced). As a result, the image smooths out over // time, since the output image is the contents of this array divided // by the number of iterations. // // Your renderer will do the same thing, and, over time, it will become // smoother. image[index] += glm::vec3(u01(rng)); } } //Create ray to be shot at a pixel in the image __global__ void kernRayGenerate(Camera cam, Ray *ray, int iter, bool dof){ int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int index = x + (y*cam.resolution.x); thrust::default_random_engine rng = makeSeededRandomEngine(iter, index, 0); thrust::uniform_real_distribution<float> unitDistrib(-.5f, .5f); thrust::uniform_real_distribution<float> dofDistrib(-1.0f, 1.0f); //Calculate camera's world position if (x < cam.resolution.x && y < cam.resolution.y) { glm::vec3 A = glm::cross(cam.view, cam.up); glm::vec3 B = glm::cross(A, cam.view); glm::vec3 M = cam.position + cam.view; float lenC = glm::length(cam.view); float lenA = glm::length(A); float lenB = glm::length(B); float tantheta = (float)cam.resolution.x; tantheta /= (float)cam.resolution.y; tantheta *= tan((float)glm::radians(cam.fov[1])); glm::vec3 H = (A*lenC*tantheta) / lenA; glm::vec3 V = (B*lenC*tan((float)glm::radians(cam.fov[1]))) / lenB; //Create ray with direction and origin //Jitter rays with uniform distribution //printf("%f ", unitDistrib(rng)); float sx = ((float)x + unitDistrib(rng)) / ((float)cam.resolution.x - 1.0f); float sy = ((float)y + unitDistrib(rng)) / ((float)cam.resolution.y - 1.0f); //Get world coordinates of pixel glm::vec3 WC = M - (2.0f*sx - 1.0f)*H - (2.0f*sy - 1.0f)*V; //Get direction of ray glm::vec3 dir = glm::normalize(WC - cam.position); ray[index].origin = cam.position; ray[index].direction = dir; ray[index].color = glm::vec3(1.0, 1.0, 1.0); ray[index].index = index; ray[index].terminated = false; ray[index].out = true; if (dof == true) { glm::vec3 apOff = glm::vec3(dofDistrib(rng), dofDistrib(rng), 0.0f); glm::vec3 new_E = cam.position + apOff; float focal = 11.5866f; //glm::length(glm::vec3(-2.0f, 5.0f,2.0f) - new_E); dir *= focal; dir -= apOff; dir = glm::normalize(dir); ray[index].origin = new_E; ray[index].direction = dir; } } } //Helper function to get random point on cubic light __device__ glm::vec3 getRandomPointOnCube(Geom node, int iter, int index) { // TODO: get the dimensions of the transformed cube in world space glm::vec3 dim(0.0f, 0.0f, 0.0f); dim = node.scale; // Get surface area of the cube float side1 = dim[0] * dim[1]; // x-y float side2 = dim[1] * dim[2]; // y-z float side3 = dim[0] * dim[2]; // x-z float totalArea = 2.0f * (side1 + side2 + side3); thrust::default_random_engine rng = makeSeededRandomEngine(iter, index, 0); thrust::uniform_real_distribution<float> unitDistrib(-.5f, .5f); thrust::uniform_real_distribution<float> dofDistrib(0.0f, 1.0f); // pick random face weighted by surface area float r = floor(dofDistrib(rng)); // pick 2 random components for the point in the range (-0.5, 0.5) float c1 = unitDistrib(rng); float c2 = unitDistrib(rng); glm::vec3 point; if (r < side1 / totalArea) { // x-y front point = glm::vec3(c1, c2, 0.5f); } else if (r < (side1 * 2) / totalArea) { // x-y back point = glm::vec3(c1, c2, -0.5f); } else if (r < (side1 * 2 + side2) / totalArea) { // y-z front point = glm::vec3(0.5f, c1, c2); } else if (r < (side1 * 2 + side2 * 2) / totalArea) { // y-z back point = glm::vec3(-0.5f, c1, c2); } else if (r < (side1 * 2 + side2 * 2 + side3) / totalArea) { // x-z front point = glm::vec3(c1, 0.5f, c2); } else { // x-z back point = glm::vec3(c1, -0.5f, c2); } // TODO: transform point to world space glm::mat4 T(1.0f); T = glm::translate(T, node.translation); if (node.rotation[0] != 0){ T = glm::rotate(T, node.rotation[0]*(PI/180.0f), glm::vec3(1,0,0)); } if (node.rotation[1] != 0){ T = glm::rotate(T, node.rotation[1]*(PI/180.0f), glm::vec3(0,1,0)); } if (node.rotation[2] != 0){ T = glm::rotate(T, node.rotation[2]*(PI/180.0f), glm::vec3(0,0,1)); } //T = glm::scale(T, node.scale); glm::vec4 newPoint = T*glm::vec4(point, 1.0f); point = glm::vec3(newPoint[0], newPoint[1], newPoint[2]); return point; } //Helper function to get random point on spherical light /*__device__ glm::vec3 getRandomPointOnSphere(Geom node, int iter, int index) { // generate u, v, in the range (0, 1) float u = static_cast <float> (rand()) / static_cast <float> (RAND_MAX); float v = static_cast <float> (rand()) / static_cast <float> (RAND_MAX); float theta = 2.0f * PI * u; float phi = acos(2.0f * v - 1.0f); // find x, y, z coordinates assuming unit sphere in object space glm::vec3 point; point[0] = sin(phi) * cos(theta); point[1] = sin(phi) * sin(theta); point[2] = cos(phi); // TODO: transform point to world space glm::mat4 T(1.0f); T = glm::translate(T, node.translation); if (node.rotation[0] != 0){ T = glm::rotate(T, node.rotation[0]*(PI/180.0f), glm::vec3(1,0,0)); } if (node.rotation[1] != 0){ T = glm::rotate(T, node.rotation[1]*(PI/180.0f), glm::vec3(0,1,0)); } if (node.rotation[2] != 0){ T = glm::rotate(T, node.rotation[2]*(PI/180.0f), glm::vec3(0,0,1)); } glm::vec4 newPoint = T*glm::vec4(point, 1.0f); point = glm::vec3(newPoint[0], newPoint[1], newPoint[2]); return point; }*/ //Helper function to find closest intersection __device__ float closestIntersection(Ray ray, const Geom* geoms, glm::vec3 &intersectionPoint, glm::vec3 &normal, bool &outside, int &objIndex, const int numGeoms){ glm::vec3 interPoint; glm::vec3 norm; bool out; float t = -1; float dist; for (int i = 0; i < numGeoms; i++) { if (geoms[i].type == CUBE) { dist = boxIntersectionTest(geoms[i], ray, interPoint, norm, out); } else if (geoms[i].type == SPHERE) { dist = sphereIntersectionTest(geoms[i], ray, interPoint, norm, out); } if ((dist != -1 && dist < t) || t == -1) { t = dist; intersectionPoint = interPoint; normal = norm; outside = out; objIndex = i; } } return t; } //Function to find next ray __global__ void kernPathTracer(Camera cam, Ray* rayArray, const Geom* geoms, const Material* mats, const int numGeoms, const int numMats, glm::vec3* dev_image, int iter, int depth, int traceDepth, bool m_blur, int size){ //int x = (blockIdx.x * blockDim.x) + threadIdx.x; //int y = (blockIdx.y * blockDim.y) + threadIdx.y; //int index = x + (y * cam.resolution.x); int index = (blockIdx.x * blockDim.x) + threadIdx.x; int imageSize = (cam.resolution.x * cam.resolution.y); //find closest intersection /*if (rayArray[index].terminated == true && index < size) { dev_image[rayArray[index].index] = glm::vec3(1.0, 0.0, 0.0); printf("in here: %i, %i, %i \n", size, index, rayArray[index].index); return; } else { //printf("is fine: %i, %i, %i \n", depth, index, rayArray[index].index); }*/ if (index < size && rayArray[index].terminated == false) {//rayArray[index].index < imageSize && index < size && rayArray[index].terminated == false) {//x < cam.resolution.x && y < cam.resolution.y && rayArray[index].terminated == false) { thrust::default_random_engine rng = makeSeededRandomEngine(iter, index, depth); glm::vec3 interPoint; glm::vec3 norm; bool out; int objIndex; if (depth == traceDepth) { dev_image[rayArray[index].index] == glm::vec3(0.0f, 0.0f, 0.0f); return; /*for (int i = 0; i < numGeoms; i++) { if (mats[geoms[i].materialid].emittance > 0 && mats[geoms[rayArray[index].geomid].materialid].emittance == 0 && mats[geoms[rayArray[index].geomid].materialid].hasReflective == 0 && mats[geoms[rayArray[index].geomid].materialid].hasRefractive == 0) { glm::vec3 new_pt = getRandomPointOnCube(geoms[i], iter, index); rayArray[index].direction = rayArray[index].origin + glm::normalize(new_pt - rayArray[index].origin); float t = closestIntersection(rayArray[index], geoms, interPoint, norm, out, objIndex, numGeoms); if (objIndex == i) { printf("hit light in direct"); rayArray[index].color *= mats[geoms[i].materialid].emittance*mats[geoms[objIndex].materialid].color; dev_image[index] += rayArray[index].color; } } }*/ } //Geom* m_blur_geoms = new Geom[numGeoms]; float t; if (m_blur) { /*for (int i = 0; i < numGeoms; i++) { m_blur_geoms[i] = geoms[i]; m_blur_geoms[i].translation.x += m_blur_geoms[i].move.x*rayArray[index].time; m_blur_geoms[i].translation.y += m_blur_geoms[i].move.y*rayArray[index].time; m_blur_geoms[i].translation.z += m_blur_geoms[i].move.z*rayArray[index].time; }*/ t = closestIntersection(rayArray[index], geoms, interPoint, norm, out, objIndex, numGeoms); } else { t = closestIntersection(rayArray[index], geoms, interPoint, norm, out, objIndex, numGeoms); } rayArray[index].geomid = objIndex; //get direction of next ray and compute new color if (t >= 0.0f) { if (mats[geoms[objIndex].materialid].emittance >= 1) { rayArray[index].color *= mats[geoms[objIndex].materialid].emittance*mats[geoms[objIndex].materialid].color; dev_image[rayArray[index].index] += rayArray[index].color; rayArray[index].terminated = true; } else { scatterRay(rayArray[index], rayArray[index].color, interPoint, norm, mats[geoms[objIndex].materialid], out, rng); } } else { //dev_image[index] *= glm::vec3(0.0f, 0.0f, 0.0f); //rayArray[index].color; rayArray[index].terminated = true; } } } __global__ void kernCombine(int *maxArray, int *newData, int n) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index < n) { //printf("index: %i max: %i \n", g_idata[index], maxArray[blockIdx.x]); newData[index] = newData[index] + maxArray[blockIdx.x]; } } __global__ void kernScan(int *maxArray, int *g_idata, int n) { //printf("IN THIN FUNCTION AELFHGGGGHF"); extern __shared__ int temp[]; //printf("blockId: %i", blockDim.x); int thid = threadIdx.x + (blockIdx.x * blockDim.x); int t = threadIdx.x; int offset = 1; temp[2*t] = g_idata[2*thid]; temp[2*t+1] = g_idata[2*thid+1]; for (int d = (2*blockDim.x)>>1; d > 0; d >>=1) { __syncthreads(); if (t < d) { int ai = offset*(2*t+1)-1; int bi = offset*(2*t+2)-1; temp[bi] += temp[ai]; } offset *= 2; } if (t == 0) { temp[2*blockDim.x-1] = 0; } for (int d = 1; d < (2*blockDim.x); d*=2) { offset >>= 1; __syncthreads(); if (t < d) { int ai = offset *(2*t+1)-1; int bi = offset *(2*t+2)-1; float t2 = temp[ai]; temp[ai] = temp[bi]; temp[bi] += t2; } } __syncthreads(); if (t == (blockDim.x - 1)) { maxArray[blockIdx.x] = temp[2*t+1] + g_idata[2*thid+1]; } g_idata[2*thid] = temp[2*t]; //printf("(%i, %i) \n", thid, g_idata[2*thid]); g_idata[2*thid+1] = temp[2*t+1]; } void scan(int n, const int *idata, int *odata, int newSize) { int blockSize = 32; int numBlocks = ceil((float)n / (float)blockSize); int powTwo = 1<<ilog2ceil(n); dim3 fullBlocksPerGrid(((powTwo/2) + blockSize - 1) / blockSize); int* maxArray; int* newArray; int randomNum = 0; int* g_idata; cudaMalloc((void**)&g_idata, powTwo * sizeof(int)); checkCUDAError("pathtrace"); cudaMalloc((void**)&newArray, powTwo * sizeof(int)); checkCUDAError("pathtrace"); cudaMemset(newArray, 0, powTwo * sizeof(int)); checkCUDAError("pathtrace"); newSize = n; int* scanArray = new int[n]; //scanArray[0] = 0; for (int i = 0; i < n; i++) { scanArray[i] = idata[i]; //printf("bool %i: %i \n", i, scanArray[i]); } cudaMalloc((void**)&maxArray, (((powTwo/2) + blockSize - 1) / blockSize) * sizeof(int)); checkCUDAError("pathtrace"); cudaMemcpy(newArray, scanArray, n*sizeof(int), cudaMemcpyHostToDevice); //printf("fullblocks: %i \n", ((powTwo/2) + blockSize - 1) / blockSize); cudaDeviceSynchronize(); checkCUDAError("pathtrace"); kernScan<<<fullBlocksPerGrid, blockSize, 2*blockSize*sizeof(int)>>>(maxArray, newArray, n); checkCUDAError("pathtrace"); cudaDeviceSynchronize(); cudaMemcpy(odata, newArray, n*sizeof(int), cudaMemcpyDeviceToHost); int maxSize = ((powTwo/2) + blockSize - 1) / blockSize; if (maxSize != 1) { int* hst_maxArray = new int[maxSize]; int* scanMax = new int[maxSize]; int* dev_scanMax; cudaMalloc((void**)&dev_scanMax, maxSize*sizeof(int)); cudaMemcpy(hst_maxArray, maxArray, maxSize*sizeof(int), cudaMemcpyDeviceToHost); //printf("%i ", hst_maxArray[maxSize - 1]); scan(maxSize, hst_maxArray, scanMax, randomNum); //cudaMemcpy(odata, g_idata, n*sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(dev_scanMax, scanMax, maxSize*sizeof(int), cudaMemcpyHostToDevice); //kernCombine<<<fullBlocksPerGrid, blockSize>>>(dev_scanMax, g_idata, n); //cudaMemcpy(odata, g_idata, n*sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(newArray, odata, n*sizeof(int), cudaMemcpyHostToDevice); /*for (int i = 0; i < maxSize; i++) { for (int j = blockSize*2*i; j < blockSize*2*(i+1)) { odata[] } }*/ kernCombine<<<fullBlocksPerGrid, blockSize*2>>>(dev_scanMax, newArray, n); //checkCUDAError("pathtrace"); cudaMemcpy(odata, newArray, n*sizeof(int), cudaMemcpyDeviceToHost); newSize = hst_maxArray[maxSize - 1]; cudaFree(dev_scanMax); } //printf(" don with function "); cudaFree(maxArray); cudaFree(g_idata); cudaFree(newArray); //checkCUDAError("pathtrace"); } __global__ void kernScatter(int n, Ray *odata, const Ray *idata, const int *bools, const int *indices) { int thrId = threadIdx.x + (blockIdx.x * blockDim.x); if (thrId < n) { //printf("BOOLS: %i \n", bools[thrId]); if (bools[thrId] == 1) { //printf("old index: %i , new index: %i \n", thrId, indices[thrId]); odata[indices[thrId]] = idata[thrId]; //printf("old ray index: %i, new ray index: %i \n", idata[thrId].index, odata[indices[thrId]].index); } } } /** * Wrapper for the __global__ call that sets up the kernel calls and does a ton * of memory management */ void pathtrace(uchar4 *pbo, int frame, int iter) { //printf("iter: %i \n", iter); const int traceDepth = hst_scene->state.traceDepth; const Camera &cam = hst_scene->state.camera; const int pixelcount = cam.resolution.x * cam.resolution.y; const Geom *geoms = &(hst_scene->geoms)[0]; Geom *m_blur_geoms = &(hst_scene->geoms)[0]; int numGeoms = hst_scene->geoms.size(); int numMats = hst_scene->materials.size(); Ray *rayArray = new Ray[pixelcount]; int max_iter = 1000; //hst_scene->state.iterations; const int blockSideLength = 8; const dim3 blockSize(blockSideLength, blockSideLength); checkCUDAError("pathtrace"); const dim3 blocksPerGrid( (cam.resolution.x + blockSize.x - 1) / blockSize.x, (cam.resolution.y + blockSize.y - 1) / blockSize.y); //const dim3 blocksPerGrid((pixelcount + blockSize - 1) / blockSize); /////////////////////////////////////////////////////////////////////////// // Recap: // * Initialize array of path rays (using rays that come out of the camera) // * You can pass the Camera object to that kernel. // * Each path ray is a (ray, color) pair, where color starts as the // multiplicative identity, white = (1, 1, 1). // * For debugging, you can output your ray directions as colors. // * For each depth: // * Compute one new (ray, color) pair along each path (using scatterRay). // Note that many rays will terminate by hitting a light or hitting // nothing at all. You'll have to decide how to represent your path rays // and how you'll mark terminated rays. // * Color is attenuated (multiplied) by reflections off of any object // surface. // * You can debug your ray-scene intersections by displaying various // values as colors, e.g., the first surface normal, the first bounced // ray direction, the first unlit material color, etc. // * Add all of the terminated rays' results into the appropriate pixels. // * Stream compact away all of the terminated paths. // You may use either your implementation or `thrust::remove_if` or its // cousins. // * Finally, handle all of the paths that still haven't terminated. // (Easy way is to make them black or background-colored.) // TODO: perform one iteration of path tracing bool dof = true; bool m_blur = false; bool streamCompaction = true; int size = pixelcount; if (m_blur && iter < max_iter) { for (int i = 0; i < numGeoms; i++) { m_blur_geoms[i] = geoms[i]; m_blur_geoms[i].translation.x += geoms[i].move.x / (float)max_iter; m_blur_geoms[i].translation.y += geoms[i].move.y / (float)max_iter; m_blur_geoms[i].translation.z += geoms[i].move.z / (float)max_iter; m_blur_geoms[i].transform = utilityCore::buildTransformationMatrix(m_blur_geoms[i].translation, m_blur_geoms[i].rotation, m_blur_geoms[i].scale); m_blur_geoms[i].inverseTransform = glm::inverse(m_blur_geoms[i].transform); m_blur_geoms[i].invTranspose = glm::inverseTranspose(m_blur_geoms[i].transform); //printf("(%f, %f, %f)", m_blur_geoms[i].translation.x, m_blur_geoms[i].translation.y, m_blur_geoms[i].translation.z); } cudaMemcpy(dev_geoms, m_blur_geoms, hst_scene->geoms.size() * sizeof(Geom), cudaMemcpyHostToDevice); checkCUDAError("pathtrace"); } else { cudaMemcpy(dev_geoms, geoms, hst_scene->geoms.size() * sizeof(Geom), cudaMemcpyHostToDevice); } checkCUDAError("pathtrace"); int newSize = pixelcount; //printf("Heyo starting over \n newSize: %i \n", newSize); Ray* dev_rayShort; cudaMalloc((void**)&dev_rayShort, pixelcount * sizeof(Ray)); checkCUDAError("pathtrace"); cudaMalloc((void**)&dev_rayArray, pixelcount * sizeof(Ray)); checkCUDAError("pathtrace"); kernRayGenerate<<<blocksPerGrid, blockSize>>>(cam, dev_rayArray, iter, dof); checkCUDAError("pathtrace"); //cuda events cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); checkCUDAError("pathtrace"); for (int i = 0; i < traceDepth + 1; i++) { int* hst_bool = new int[newSize]; int* dev_bool; int* dev_indices; int* hst_indices = new int[newSize]; int blockSizeNew = 64; dim3 blocksGridNew((newSize + blockSizeNew - 1) / blockSizeNew); checkCUDAError("pathtrace"); //printf("BlocksPerGrid: %i \n", (newSize + blockSizeNew - 1) / blockSizeNew); if (newSize == 0) { cudaEventRecord(stop); break; } //printf("size: %i \n", newSize); kernPathTracer<<<blocksGridNew, blockSizeNew>>>(cam, dev_rayArray, dev_geoms, dev_mats, numGeoms, numMats, dev_image, iter, i, traceDepth, m_blur, newSize); checkCUDAError("pathtrace"); cudaMemcpy(rayArray, dev_rayArray, pixelcount*sizeof(Ray), cudaMemcpyDeviceToHost); checkCUDAError("pathtrace"); if (streamCompaction) { /* for (int m = 0; m < newSize; m++) { float ran = rand()%100; if (ran < 50) { hst_bool[m] = 0; //printf("%i: %i \n", m, hst_bool[m]); } else { hst_bool[m] = 1; //printf("%i: %i \n", m, hst_bool[m]); } } if (newSize > 1) { scan(newSize, hst_bool, hst_indices, newSize); newSize = hst_bool[newSize - 1] + hst_indices[newSize - 1]; }*/ for (int m = 0; m < newSize; m++) { if (rayArray[m].terminated) { hst_bool[m] = 0; //printf("%i: %i \n", m, hst_bool[m]); } else { hst_bool[m] = 1; //printf("%i: %i \n", m, hst_bool[m]); //printf("%i: %i \n", m, hst_bool[m]); } } int oldSize = newSize; if (newSize > 1) { //printf("old Size: %i \n", oldSize ); scan(oldSize, hst_bool, hst_indices, newSize); newSize = hst_bool[oldSize - 1] + hst_indices[oldSize - 1]; //printf("new Size: %i \n", newSize ); cudaMalloc((void**)&dev_bool, oldSize * sizeof(int)); cudaMalloc((void**)&dev_indices, oldSize * sizeof(int)); cudaMemcpy(dev_bool, hst_bool, oldSize*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_indices, hst_indices, oldSize*sizeof(int), cudaMemcpyHostToDevice); checkCUDAError("pathtrace"); //printf("%i, %i, %i \n", oldSize, blocksGridNew, blockSizeNew); kernScatter<<<blocksGridNew, blockSizeNew>>>(oldSize, dev_rayShort, dev_rayArray, dev_bool, dev_indices); checkCUDAError("pathtrace"); //printf("newSize: %i \n", newSize); //cudaMemcpy(rayArray, dev_rayShort, newSize*sizeof(Ray), cudaMemcpyDeviceToHost); cudaMemcpy(dev_rayArray, dev_rayShort, newSize*sizeof(Ray), cudaMemcpyDeviceToDevice); checkCUDAError("pathtrace"); cudaFree(dev_bool); cudaFree(dev_indices); } int k; //std::cin >> k; } } cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); //printf("time per iteration: %f \n", milliseconds); checkCUDAError("pathtrace"); cudaMemcpy(rayArray, dev_rayArray, pixelcount*sizeof(Ray), cudaMemcpyDeviceToHost); checkCUDAError("pathtrace"); //generateNoiseDeleteMe<<<blocksPerGrid, blockSize>>>(cam, iter, dev_image); /////////////////////////////////////////////////////////////////////////// // Send results to OpenGL buffer for rendering sendImageToPBO<<<blocksPerGrid, blockSize>>>(pbo, cam.resolution, iter, dev_image); checkCUDAError("pathtrace"); // Retrieve image from GPU cudaMemcpy(hst_scene->state.image.data(), dev_image, pixelcount * sizeof(glm::vec3), cudaMemcpyDeviceToHost); cudaFree(dev_rayShort); cudaFree(dev_rayArray); checkCUDAError("pathtrace"); }
ec3d6da5da841df2cdc13ef0b848b1be1d9ee7d5.hip
// !!! This is a file automatically generated by hipify!!! #include "../common/common.h" #include <hip/hip_runtime.h> #include <stdio.h> /* * An example of using CUDA's memory copy API to transfer data to and from the * device. In this case, hipMalloc is used to allocate memory on the GPU and * hipMemcpy is used to transfer the contents of host memory to an array * allocated using hipMalloc. Host memory is allocated using hipHostMalloc to * create a page-locked host array. */ int main(int argc, char **argv) { // set up device int dev = 0; CHECK(hipSetDevice(dev)); // memory size unsigned int isize = 1 << 22; unsigned int nbytes = isize * sizeof(float); // get device information hipDeviceProp_t deviceProp; CHECK(hipGetDeviceProperties(&deviceProp, dev)); if (!deviceProp.canMapHostMemory) { printf("Device %d does not support mapping CPU host memory!\n", dev); CHECK(hipDeviceReset()); exit(EXIT_SUCCESS); } printf("%s starting at ", argv[0]); printf("device %d: %s memory size %d nbyte %5.2fMB canMap %d\n", dev, deviceProp.name, isize, nbytes / (1024.0f * 1024.0f), deviceProp.canMapHostMemory); // allocate pinned host memory float *h_a; CHECK(hipHostMalloc ((float **)&h_a, nbytes)); // allocate device memory float *d_a; CHECK(hipMalloc((float **)&d_a, nbytes)); // initialize host memory memset(h_a, 0, nbytes); for (int i = 0; i < isize; i++) h_a[i] = 100.10f; // transfer data from the host to the device CHECK(hipMemcpy(d_a, h_a, nbytes, hipMemcpyHostToDevice)); // transfer data from the device to the host CHECK(hipMemcpy(h_a, d_a, nbytes, hipMemcpyDeviceToHost)); // free memory CHECK(hipFree(d_a)); CHECK(hipHostFree(h_a)); // reset device CHECK(hipDeviceReset()); return EXIT_SUCCESS; }
ec3d6da5da841df2cdc13ef0b848b1be1d9ee7d5.cu
#include "../common/common.h" #include <cuda_runtime.h> #include <stdio.h> /* * An example of using CUDA's memory copy API to transfer data to and from the * device. In this case, cudaMalloc is used to allocate memory on the GPU and * cudaMemcpy is used to transfer the contents of host memory to an array * allocated using cudaMalloc. Host memory is allocated using cudaMallocHost to * create a page-locked host array. */ int main(int argc, char **argv) { // set up device int dev = 0; CHECK(cudaSetDevice(dev)); // memory size unsigned int isize = 1 << 22; unsigned int nbytes = isize * sizeof(float); // get device information cudaDeviceProp deviceProp; CHECK(cudaGetDeviceProperties(&deviceProp, dev)); if (!deviceProp.canMapHostMemory) { printf("Device %d does not support mapping CPU host memory!\n", dev); CHECK(cudaDeviceReset()); exit(EXIT_SUCCESS); } printf("%s starting at ", argv[0]); printf("device %d: %s memory size %d nbyte %5.2fMB canMap %d\n", dev, deviceProp.name, isize, nbytes / (1024.0f * 1024.0f), deviceProp.canMapHostMemory); // allocate pinned host memory float *h_a; CHECK(cudaMallocHost ((float **)&h_a, nbytes)); // allocate device memory float *d_a; CHECK(cudaMalloc((float **)&d_a, nbytes)); // initialize host memory memset(h_a, 0, nbytes); for (int i = 0; i < isize; i++) h_a[i] = 100.10f; // transfer data from the host to the device CHECK(cudaMemcpy(d_a, h_a, nbytes, cudaMemcpyHostToDevice)); // transfer data from the device to the host CHECK(cudaMemcpy(h_a, d_a, nbytes, cudaMemcpyDeviceToHost)); // free memory CHECK(cudaFree(d_a)); CHECK(cudaFreeHost(h_a)); // reset device CHECK(cudaDeviceReset()); return EXIT_SUCCESS; }
34183384b34a1b1ef7609a56e3866b782e48c0d3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "kernel.h" #define TX 32 #define TY 32 #define DIM 2100 struct hipComplex { float r; float i; __device__ hipComplex( float a, float b ) : r(a), i(b) {} __device__ float magnitude2( void ) { return r * r + i * i; } __device__ hipComplex operator*(const hipComplex& a) { return hipComplex(r*a.r - i*a.i, i*a.r + r*a.i); } __device__ hipComplex operator-(const hipComplex& a) { return hipComplex(r-a.r, i-a.i); } __device__ hipComplex operator+(const hipComplex& a) { return hipComplex(r+a.r, i+a.i); } __device__ hipComplex operator/(const hipComplex& a) { return hipComplex((r*a.r + i*a.i)/(a.r*a.r + a.i*a.i), (i*a.r - r*a.i)/(a.r*a.r + a.i*a.i)); } }; __device__ hipComplex conj(hipComplex m) { hipComplex out(m.r,-m.i); return out; } __device__ hipComplex nor(hipComplex m) { hipComplex out(m.r*m.r+m.i*m.i,0.0); return out; } __device__ float norg(hipComplex m) { return sqrtf(m.r*m.r+m.i*m.i); } __device__ hipComplex qpoch(hipComplex a, hipComplex q) { hipComplex out(1.0,0.0); hipComplex unity(1.0,0.0); int i = 0; hipComplex Q = q; if(q.magnitude2()>1.0) { return hipComplex(0.0,0.0); } // We want to formally match the definition of a q-pochhammer symbol. for(i=1;i<80;i++) { out = out * (unity - a*Q); Q = q * Q; } return out; } __device__ hipComplex qp(hipComplex a, hipComplex q, int n) { hipComplex out(1.0,0.0); hipComplex unity(1.0,0.0); int i = 0; hipComplex Q = q; if(q.magnitude2()>1.0) { return hipComplex(0.0,0.0); } // We want to formally match the definition of a q-pochhammer symbol. for(i=1;i<n;i++) { out = out * (unity - a*Q); Q = q * Q; } return out; } __device__ hipComplex ramphi(hipComplex q) { hipComplex out(1.0,0.0); hipComplex mone(-1.0,0.0); hipComplex mq = mone*q; return qpoch(mq,mq)/qpoch(q,mq); } __device__ hipComplex rampsi(hipComplex q) { hipComplex out(1.0,0.0); hipComplex mone(-1.0,0.0); hipComplex mq = mone*q; return qpoch(mq,q)*qpoch(q*q,q*q); } __device__ hipComplex ramchi(hipComplex q) { hipComplex out(1.0,0.0); hipComplex mone(-1.0,0.0); hipComplex mq = mone*q; return qpoch(mq,q*q); } __device__ hipComplex ramf(hipComplex a, hipComplex b) { hipComplex out(1.0,0.0); hipComplex mone(-1.0,0.0); hipComplex ma = mone*a; hipComplex mb = mone*b; return qpoch(ma,a*b)*qpoch(mb,a*b)*qpoch(a*b,a*b); } // complex exponential __device__ hipComplex expc(hipComplex m) { hipComplex out(expf(m.r) * cosf(m.i),expf(m.r) * sinf(m.i)); return out; } __device__ hipComplex powc(hipComplex ag, hipComplex bg) { hipComplex out(0.0,0.0); hipComplex mesp(0.0,0.0); hipComplex frim(0.0,0.0); double radiu, thet; /* get the proper polar form of the complex number */ radiu = sqrtf(ag.r*ag.r + ag.i*ag.i); thet = atan2f(ag.i,ag.r); /* mesp gives R^(c+di) */ mesp.r = powf(radiu,bg.r)*cosf(bg.i*logf(radiu)); mesp.i = powf(radiu,bg.r)*sinf(bg.i*logf(radiu)); /* frim gives e^(i theta (c+di)) */ /* now since we already have the machinery for performing complex exponentiation (just exp), we can just call that here */ frim.r = -1.0 * bg.i * thet; frim.i = bg.r * thet; frim = expc(frim); out = mesp*frim; return out; } // cosine (nothing algorithmically clean) __device__ hipComplex cosc(hipComplex m) { hipComplex ai(0.0,1.0); hipComplex ot(0.5,0.0); hipComplex mone(-1.0,0.0); hipComplex out = ot*(expc(m*ai) + expc(mone*m*ai)); return out; } __device__ hipComplex sins(hipComplex m) { hipComplex ai(0.0,1.0); hipComplex ot(0.0,0.5); hipComplex mone(-1.0,0.0); hipComplex out = ot*(expc(m*ai) - expc(mone*m*ai)); return out; } __device__ hipComplex tans(hipComplex m) { return sins(m)/cosc(m); } __device__ hipComplex moeb(hipComplex t, hipComplex a, hipComplex z) { hipComplex out(0.0,0.0); hipComplex ai(0.0,1.0); hipComplex unity(1.0,0.0); out = expc(ai*t) * (z-a)/(unity-conj(a)*z); return out; } __device__ hipComplex bnewt(hipComplex z) { hipComplex three(3.0,0.0); hipComplex unity(1.0,0.0); hipComplex out(0.0,0.0); hipComplex Z =z; hipComplex L(0.0,0.0); hipComplex R(0.62348980185873359,0.7818314824680298); hipComplex v(0.62348980185873359,0.7818314824680298); int i; for(i=0;i<100;i++) { L = sins(expc(Z)-cosc(Z))-Z; out = out + v*L; v = R * v; Z = Z - L/((expc(Z)+sins(Z))*cosc(expc(Z)-cosc(Z))-unity); } return out; } __device__ hipComplex they3(hipComplex z, hipComplex q) { int u; hipComplex out(0.0,0.0); hipComplex enn(-20.0,0.0); hipComplex onn(1.0,0.0); hipComplex dui(0.0,1.0); for(u=-20;u<20;u++) { out = out + powc(q,enn*enn)*expc(dui*enn*z); enn = enn + onn; } return out; } __device__ hipComplex wahi(hipComplex z) { int u; hipComplex un(1.0,0.0); hipComplex ne(1.0,0.0); hipComplex out(0.0,0.0); for(u=1;u<40;u++) { out = out + powc(z/ne,ne); ne = ne + un; } out = out + un; return out; } __device__ hipComplex dwahi(hipComplex z) { int u; hipComplex un(1.0,0.0); hipComplex ne(1.0,0.0); hipComplex out(0.0,0.0); for(u=1;u<40;u++) { out = out + powc(z/ne,ne-un); ne = ne + un; } return out; } __device__ hipComplex they3p(hipComplex z, hipComplex q) { int u; hipComplex out(0.0,0.0); hipComplex enn(-20.0,0.0); hipComplex onn(1.0,0.0); hipComplex dui(0.0,1.0); for(u=-20;u<20;u++) { out = out + (enn*enn)*powc(q,enn*enn-onn)*expc(dui*enn*z); enn = enn + onn; } return out; } __device__ hipComplex h3ey3p(hipComplex z, hipComplex q) { int u; hipComplex out(0.0,0.0); hipComplex aut(0.0,0.0); hipComplex enn(-20.0,0.0); hipComplex onn(1.0,0.0); hipComplex dui(0.0,1.0); hipComplex vel(0.0,0.0); hipComplex rav(0.0,0.0); for(u=-40;u<40;u++) { vel = expc(dui*enn*z); rav = powc(q,enn*enn); aut = aut + (enn*enn)*rav/q*vel; out = out + rav*vel; enn = enn + onn; } return out/aut; } __device__ hipComplex thess(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex the1(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); hipComplex rt(0.25,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return tw*out*powc(q,rt)*sins(z); } __device__ hipComplex the2(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); hipComplex rt(0.25,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return tw*out*powc(q,rt)*cosc(z); } __device__ hipComplex the3(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex the4(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } /* routine to generate q-integers */ __device__ hipComplex qin(hipComplex a, hipComplex q) { hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); out = (unity - powc(q, a))/(unity-q); return out; } /* generating function for n^2 */ __device__ hipComplex geffa(hipComplex z, hipComplex q) { hipComplex out(0.0,0.0); hipComplex unity(1.0,0.0); hipComplex wu(0.0,0.0); hipComplex Z=unity; int v; for(v=0;v<20;v++) { out = out + qin(wu*wu,q)* Z; wu = wu + unity; Z = z * Z; } return out; } __device__ hipComplex thratd(hipComplex z, hipComplex q) { int n; hipComplex fau(4.0,0.0); hipComplex too(2.0,0.0); hipComplex unity(1.0,0.0); hipComplex ennn(1.0,0.0); hipComplex ni(-1.0,0.0); hipComplex noo(-1.0,0.0); hipComplex out(0.0,0.0); hipComplex loo = q; hipComplex qoo =q*q; for(n=0;n<80;n++) { out = out + noo*(loo/(unity-qoo))*sins(too*ennn*z); qoo = qoo * q*q; loo = loo * q; ennn = ennn +unity; noo = ni * noo; } return out*fau; } __device__ hipComplex thess4(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex thesk(hipComplex z, hipComplex q, hipComplex r) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); hipComplex roo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; roo = roo * r * r ; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + roo*roo/(r*r)); } return out; } __device__ hipComplex thass(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * sins(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex rogers( hipComplex q) { hipComplex onf(0.2,0.0); hipComplex Q5 = q*q*q*q*q; hipComplex out = powc(q,onf)* qpoch(q,Q5) * qpoch(q*q*q*q,Q5)/ (qpoch(q*q,Q5)*qpoch(q*q*q,Q5)); return out; } __device__ hipComplex flat(hipComplex m) { float ua = sqrtf(m.r*m.r + m.i*m.i); hipComplex out(m.r/ua,m.i/ua); return out; } __device__ hipComplex eff(hipComplex z, hipComplex lambda) { return z*z*z*z+ lambda/(z*z*z*z); } __device__ hipComplex thete(float R, hipComplex tau, hipComplex z) { /* note that as I'm not immediately doing this on the unit circle, as the real action is considered to happen on the z-plane, we don't yet need to fret about whether I'm looking at things in terms of tau or in terms of q, next revision */ /* set accumulant to zero */ hipComplex A(0.0,0.0); /* miscellaneous setup */ hipComplex pai(3.14159265353898,0.0); hipComplex ai(0.0,1.0); hipComplex oo(1.0,0.0); hipComplex oot(2.0,0.0); hipComplex nini(9.0,0.0); hipComplex eigh(-18.0,0.0); /* hipComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */ hipComplex frann(1.0,0.0); frann = pai * ai * tau ; hipComplex shenn(1.0,0.0); shenn = oot * ai * z; hipComplex plenn(1.0,0.0); hipComplex enn(1.0,0.0); hipComplex ann(1.0,0.0); hipComplex bnn(1.0,0.0); hipComplex scrunn(1.0,0.0); float ca, cb,cc; int a, b; for(a=-10;a<10;a++) { ann.r = a; for(b=-10;b<10;b++) { bnn.r = b; if(((a+b)%2)==0) { scrunn.r = a*a + b*b; A = A + expc(frann* scrunn) * expc(shenn* (ann+bnn)); } else { ca = 5.0 + a*a + b*b; cb = 2*(a * cos(R)- b * sin(R)); cc = 4*(b * cos(R)+a*sin(R)); scrunn.r = ca + cb + cc; A = A + expc(frann*scrunn)*expc(shenn*(ann+bnn)); } } } return A; } __device__ hipComplex thetta(hipComplex tau, hipComplex z) { /* note that as I'm not immediately doing this on the unit circle, as the real action is considered to happen on the z-plane, we don't yet need to fret about whether I'm looking at things in terms of tau or in terms of q, next revision */ /* set accumulant to zero */ hipComplex A(0.0,0.0); /* miscellaneous setup */ hipComplex pai(3.14159265353898,0.0); hipComplex ai(0.0,1.0); hipComplex oo(1.0,0.0); hipComplex oot(2.0,0.0); hipComplex nini(9.0,0.0); hipComplex eigh(-18.0,0.0); /* hipComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */ hipComplex frann(1.0,0.0); frann = pai * ai * tau ; hipComplex shenn(1.0,0.0); shenn = oot * ai * z; hipComplex plenn(1.0,0.0); hipComplex enn(1.0,0.0); int n; for(n=-10;n<10;n++) { enn.r = n; plenn = enn * enn; /* this get the hipComplex out of the event loop */ A = A + expc(frann* plenn) * expc(shenn* enn); } return A; } __device__ hipComplex mitlef(hipComplex z,hipComplex c) { hipComplex out(0.0,0.0); hipComplex Z(1.0,0.0); hipComplex frove(0.0,0.0); int v; for(v=0;v<20;v++) { frove.r = tgammaf(c.r*v+c.i); out = out + Z/frove; Z = Z * z; } return out; } __device__ hipComplex hoova(hipComplex z) { hipComplex out(j0f(z.r),j1f(z.i)); return out; } __device__ hipComplex arago(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * hoova(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ unsigned char clip(int n) { return n > 255 ? 255 : (n < 0 ? 0 : n); } __global__ void distanceKernel(uchar4 *d_out, int w, int h, int2 pos) { const int c = blockIdx.x*blockDim.x + threadIdx.x; const int r= blockIdx.y*blockDim.y + threadIdx.y; const int i = c + r*w; // 1D indexing float pi = 3.1415926535898; hipComplex ip(pi,0.0); const float scale = 20.0; float fx = -scale * (float)(DIM/2 - c)/(DIM/2); float fy = scale * (float)(DIM/2 - r)/(DIM/2); hipComplex effx(fx,0.0); hipComplex effy(fy,0.0); float LA = -scale * (float)(DIM/2 - pos.x)/(DIM/2); float LB = scale * (float)(DIM/2 - pos.y)/(DIM/2); hipComplex mouse(LA,LB); hipComplex moux(LA,0.0); hipComplex mouy(0.0,LB); hipComplex q(fx,fy); /* hipComplex tik(sin(ticks/40.0f),0.0);*/ /* hipComplex uon(cosf(-2*pi*ticks/16384.0),sinf(-2*pi*ticks/16384.0)); hipComplex aon(cosf(2.6457513110645912*2*pi*ticks/1024),sinf(2.645751311064591*2*pi*ticks/1024)); hipComplex eon(cosf(-2.6457513110645912*2*pi*ticks/1024.0),sinf(2.645751311064591*2*pi*ticks/1024.0));*/ hipComplex fixon(.029348,.828934); hipComplex faxon(.029348,-.828934); hipComplex unity(1.0,0.0); hipComplex ai(0.0,1.0); hipComplex aon = expc(ai*moux); hipComplex uon= expc(mouy); hipComplex flurn(0.0,0.0); hipComplex accume(0.0,0.0); hipComplex eccume(0.0,0.0); hipComplex rhun(1.02871376821872462237195122725097462534904479,0.0); hipComplex cue = q; hipComplex lam(0.73736887807831963, -0.67549029426152396); hipComplex due(3.0,0.0); hipComplex tir(2.0,0.0); hipComplex selga(3.5,0.0); hipComplex vro(-1.0,0.0); hipComplex tle(1.0,0.0); hipComplex sle(4.0,0.0); hipComplex cherra(0.62348980185873359, 0.7818314824680298); hipComplex lerra = cherra*cherra; hipComplex ferra = lerra * cherra; hipComplex terra = ferra * cherra; hipComplex zerra = terra * cherra; hipComplex nerra = zerra * cherra; hipComplex vlarv(1/3.0,0.0); hipComplex sugna(0.70710678118654757, 0.70710678118654746); hipComplex regna(0.99966573338968745, 0.025853848581176047); hipComplex spa(sqrtf(2.0),0.0); hipComplex spb(sqrtf(3.0),0.0); hipComplex spc(sqrtf(4.0),0.0); hipComplex spd(sqrtf(5.0),0.0); hipComplex mrun(1/2.0,0.0); hipComplex gloon (4.0,0.0); hipComplex plenod(-.01,0.0); hipComplex nue = cue; hipComplex bor(-10.0,0.0); hipComplex nat(0.0,-10.0); hipComplex rhus(1.0,0.0); hipComplex D(0.739085133215160641655312087674,0.0); /* if ((c >= w) || (r >= h)) return; // Check if within image bounds const int i = c + r*w; // 1D indexing const int dist = sqrtf((c - pos.x)*(c - pos.x) + (r - pos.y)*(r - pos.y)); const unsigned char intensity = clip(255 - dist);*/ // theta function varying on constant // cue =thess(cue,fixon*mouse); int v=1; int axa=-10; /*while((v<100)&&norg(cue)<2.0) { cue = cue*(cue-mouy)*(cue-moux) -cue * q; v++; }*/ for(v=0;v<10;v++) { cue = cue - hoova(aon+cue*hoova(cue*uon)); } hipComplex kei=unity; double tha; tha = ((atan2(cue.i,cue.r) - pi)/(2.0*pi)); d_out[i].x = (unsigned char) (255.0*pow(sin(pi*tha),2)); d_out[i].y = (unsigned char) (255.0*pow(sin(pi*tha+pi/3),2)); d_out[i].z = (unsigned char) (255.0*pow(sin(pi*tha+2*pi/3),2)); d_out[i].w = 255; } void kernelLauncher(uchar4 *d_out, int w, int h, int2 pos) { const dim3 blockSize(TX, TY); const dim3 gridSize = dim3((w + TX - 1)/TX, (h + TY - 1)/TY); hipLaunchKernelGGL(( distanceKernel), dim3(gridSize), dim3(blockSize), 0, 0, d_out, w, h, pos); } /*for(v=1;v<5;v++) { cue = cue - cue * (expc(unity-cue/moux)+expc(cue-unity/mouy))/((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy)); accume = accume + ((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy)); } cue = accume;*/ /*cue = ramchi(moeb(unity,uon*fixon,q))*rampsi(moeb(unity,uon*fixon,q)); rhus = ramchi(uon/moeb(unity,uon*faxon,unity/q))*ramphi(uon/moeb(unity,uon*faxon,unity/q)); cue = rhus+cue; cue = cosc(unity/(unity-uon*cue))*rampsi(moeb(unity,uon*fixon,q));*/ /*for(v=0;v<60;v++){ cue = moeb(aon,fixon,cue) - aon/((expc(uon*cue-sins(cue))-cue)/((aon+cosc(cue)) * expc(uon*cue-sins(cue))-aon)); accume = accume *(unity - (expc(aon*moeb(uon,faxon,cue))-sins(moeb(aon,fixon,cue))-cue)); } cue = accume;*/ /* One for (x+d)/cos(d) -cos(x)/d Tungilipa D = cos(D) cos(sqrt(x*D))/D -1 = 0.0 The other for cos(x)-x Eripgrunna */
34183384b34a1b1ef7609a56e3866b782e48c0d3.cu
#include "kernel.h" #define TX 32 #define TY 32 #define DIM 2100 struct cuComplex { float r; float i; __device__ cuComplex( float a, float b ) : r(a), i(b) {} __device__ float magnitude2( void ) { return r * r + i * i; } __device__ cuComplex operator*(const cuComplex& a) { return cuComplex(r*a.r - i*a.i, i*a.r + r*a.i); } __device__ cuComplex operator-(const cuComplex& a) { return cuComplex(r-a.r, i-a.i); } __device__ cuComplex operator+(const cuComplex& a) { return cuComplex(r+a.r, i+a.i); } __device__ cuComplex operator/(const cuComplex& a) { return cuComplex((r*a.r + i*a.i)/(a.r*a.r + a.i*a.i), (i*a.r - r*a.i)/(a.r*a.r + a.i*a.i)); } }; __device__ cuComplex conj(cuComplex m) { cuComplex out(m.r,-m.i); return out; } __device__ cuComplex nor(cuComplex m) { cuComplex out(m.r*m.r+m.i*m.i,0.0); return out; } __device__ float norg(cuComplex m) { return sqrtf(m.r*m.r+m.i*m.i); } __device__ cuComplex qpoch(cuComplex a, cuComplex q) { cuComplex out(1.0,0.0); cuComplex unity(1.0,0.0); int i = 0; cuComplex Q = q; if(q.magnitude2()>1.0) { return cuComplex(0.0,0.0); } // We want to formally match the definition of a q-pochhammer symbol. for(i=1;i<80;i++) { out = out * (unity - a*Q); Q = q * Q; } return out; } __device__ cuComplex qp(cuComplex a, cuComplex q, int n) { cuComplex out(1.0,0.0); cuComplex unity(1.0,0.0); int i = 0; cuComplex Q = q; if(q.magnitude2()>1.0) { return cuComplex(0.0,0.0); } // We want to formally match the definition of a q-pochhammer symbol. for(i=1;i<n;i++) { out = out * (unity - a*Q); Q = q * Q; } return out; } __device__ cuComplex ramphi(cuComplex q) { cuComplex out(1.0,0.0); cuComplex mone(-1.0,0.0); cuComplex mq = mone*q; return qpoch(mq,mq)/qpoch(q,mq); } __device__ cuComplex rampsi(cuComplex q) { cuComplex out(1.0,0.0); cuComplex mone(-1.0,0.0); cuComplex mq = mone*q; return qpoch(mq,q)*qpoch(q*q,q*q); } __device__ cuComplex ramchi(cuComplex q) { cuComplex out(1.0,0.0); cuComplex mone(-1.0,0.0); cuComplex mq = mone*q; return qpoch(mq,q*q); } __device__ cuComplex ramf(cuComplex a, cuComplex b) { cuComplex out(1.0,0.0); cuComplex mone(-1.0,0.0); cuComplex ma = mone*a; cuComplex mb = mone*b; return qpoch(ma,a*b)*qpoch(mb,a*b)*qpoch(a*b,a*b); } // complex exponential __device__ cuComplex expc(cuComplex m) { cuComplex out(expf(m.r) * cosf(m.i),expf(m.r) * sinf(m.i)); return out; } __device__ cuComplex powc(cuComplex ag, cuComplex bg) { cuComplex out(0.0,0.0); cuComplex mesp(0.0,0.0); cuComplex frim(0.0,0.0); double radiu, thet; /* get the proper polar form of the complex number */ radiu = sqrtf(ag.r*ag.r + ag.i*ag.i); thet = atan2f(ag.i,ag.r); /* mesp gives R^(c+di) */ mesp.r = powf(radiu,bg.r)*cosf(bg.i*logf(radiu)); mesp.i = powf(radiu,bg.r)*sinf(bg.i*logf(radiu)); /* frim gives e^(i theta (c+di)) */ /* now since we already have the machinery for performing complex exponentiation (just exp), we can just call that here */ frim.r = -1.0 * bg.i * thet; frim.i = bg.r * thet; frim = expc(frim); out = mesp*frim; return out; } // cosine (nothing algorithmically clean) __device__ cuComplex cosc(cuComplex m) { cuComplex ai(0.0,1.0); cuComplex ot(0.5,0.0); cuComplex mone(-1.0,0.0); cuComplex out = ot*(expc(m*ai) + expc(mone*m*ai)); return out; } __device__ cuComplex sins(cuComplex m) { cuComplex ai(0.0,1.0); cuComplex ot(0.0,0.5); cuComplex mone(-1.0,0.0); cuComplex out = ot*(expc(m*ai) - expc(mone*m*ai)); return out; } __device__ cuComplex tans(cuComplex m) { return sins(m)/cosc(m); } __device__ cuComplex moeb(cuComplex t, cuComplex a, cuComplex z) { cuComplex out(0.0,0.0); cuComplex ai(0.0,1.0); cuComplex unity(1.0,0.0); out = expc(ai*t) * (z-a)/(unity-conj(a)*z); return out; } __device__ cuComplex bnewt(cuComplex z) { cuComplex three(3.0,0.0); cuComplex unity(1.0,0.0); cuComplex out(0.0,0.0); cuComplex Z =z; cuComplex L(0.0,0.0); cuComplex R(0.62348980185873359,0.7818314824680298); cuComplex v(0.62348980185873359,0.7818314824680298); int i; for(i=0;i<100;i++) { L = sins(expc(Z)-cosc(Z))-Z; out = out + v*L; v = R * v; Z = Z - L/((expc(Z)+sins(Z))*cosc(expc(Z)-cosc(Z))-unity); } return out; } __device__ cuComplex they3(cuComplex z, cuComplex q) { int u; cuComplex out(0.0,0.0); cuComplex enn(-20.0,0.0); cuComplex onn(1.0,0.0); cuComplex dui(0.0,1.0); for(u=-20;u<20;u++) { out = out + powc(q,enn*enn)*expc(dui*enn*z); enn = enn + onn; } return out; } __device__ cuComplex wahi(cuComplex z) { int u; cuComplex un(1.0,0.0); cuComplex ne(1.0,0.0); cuComplex out(0.0,0.0); for(u=1;u<40;u++) { out = out + powc(z/ne,ne); ne = ne + un; } out = out + un; return out; } __device__ cuComplex dwahi(cuComplex z) { int u; cuComplex un(1.0,0.0); cuComplex ne(1.0,0.0); cuComplex out(0.0,0.0); for(u=1;u<40;u++) { out = out + powc(z/ne,ne-un); ne = ne + un; } return out; } __device__ cuComplex they3p(cuComplex z, cuComplex q) { int u; cuComplex out(0.0,0.0); cuComplex enn(-20.0,0.0); cuComplex onn(1.0,0.0); cuComplex dui(0.0,1.0); for(u=-20;u<20;u++) { out = out + (enn*enn)*powc(q,enn*enn-onn)*expc(dui*enn*z); enn = enn + onn; } return out; } __device__ cuComplex h3ey3p(cuComplex z, cuComplex q) { int u; cuComplex out(0.0,0.0); cuComplex aut(0.0,0.0); cuComplex enn(-20.0,0.0); cuComplex onn(1.0,0.0); cuComplex dui(0.0,1.0); cuComplex vel(0.0,0.0); cuComplex rav(0.0,0.0); for(u=-40;u<40;u++) { vel = expc(dui*enn*z); rav = powc(q,enn*enn); aut = aut + (enn*enn)*rav/q*vel; out = out + rav*vel; enn = enn + onn; } return out/aut; } __device__ cuComplex thess(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex the1(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); cuComplex rt(0.25,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return tw*out*powc(q,rt)*sins(z); } __device__ cuComplex the2(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); cuComplex rt(0.25,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return tw*out*powc(q,rt)*cosc(z); } __device__ cuComplex the3(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex the4(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } /* routine to generate q-integers */ __device__ cuComplex qin(cuComplex a, cuComplex q) { cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); out = (unity - powc(q, a))/(unity-q); return out; } /* generating function for n^2 */ __device__ cuComplex geffa(cuComplex z, cuComplex q) { cuComplex out(0.0,0.0); cuComplex unity(1.0,0.0); cuComplex wu(0.0,0.0); cuComplex Z=unity; int v; for(v=0;v<20;v++) { out = out + qin(wu*wu,q)* Z; wu = wu + unity; Z = z * Z; } return out; } __device__ cuComplex thratd(cuComplex z, cuComplex q) { int n; cuComplex fau(4.0,0.0); cuComplex too(2.0,0.0); cuComplex unity(1.0,0.0); cuComplex ennn(1.0,0.0); cuComplex ni(-1.0,0.0); cuComplex noo(-1.0,0.0); cuComplex out(0.0,0.0); cuComplex loo = q; cuComplex qoo =q*q; for(n=0;n<80;n++) { out = out + noo*(loo/(unity-qoo))*sins(too*ennn*z); qoo = qoo * q*q; loo = loo * q; ennn = ennn +unity; noo = ni * noo; } return out*fau; } __device__ cuComplex thess4(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex thesk(cuComplex z, cuComplex q, cuComplex r) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); cuComplex roo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; roo = roo * r * r ; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + roo*roo/(r*r)); } return out; } __device__ cuComplex thass(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * sins(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex rogers( cuComplex q) { cuComplex onf(0.2,0.0); cuComplex Q5 = q*q*q*q*q; cuComplex out = powc(q,onf)* qpoch(q,Q5) * qpoch(q*q*q*q,Q5)/ (qpoch(q*q,Q5)*qpoch(q*q*q,Q5)); return out; } __device__ cuComplex flat(cuComplex m) { float ua = sqrtf(m.r*m.r + m.i*m.i); cuComplex out(m.r/ua,m.i/ua); return out; } __device__ cuComplex eff(cuComplex z, cuComplex lambda) { return z*z*z*z+ lambda/(z*z*z*z); } __device__ cuComplex thete(float R, cuComplex tau, cuComplex z) { /* note that as I'm not immediately doing this on the unit circle, as the real action is considered to happen on the z-plane, we don't yet need to fret about whether I'm looking at things in terms of tau or in terms of q, next revision */ /* set accumulant to zero */ cuComplex A(0.0,0.0); /* miscellaneous setup */ cuComplex pai(3.14159265353898,0.0); cuComplex ai(0.0,1.0); cuComplex oo(1.0,0.0); cuComplex oot(2.0,0.0); cuComplex nini(9.0,0.0); cuComplex eigh(-18.0,0.0); /* cuComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */ cuComplex frann(1.0,0.0); frann = pai * ai * tau ; cuComplex shenn(1.0,0.0); shenn = oot * ai * z; cuComplex plenn(1.0,0.0); cuComplex enn(1.0,0.0); cuComplex ann(1.0,0.0); cuComplex bnn(1.0,0.0); cuComplex scrunn(1.0,0.0); float ca, cb,cc; int a, b; for(a=-10;a<10;a++) { ann.r = a; for(b=-10;b<10;b++) { bnn.r = b; if(((a+b)%2)==0) { scrunn.r = a*a + b*b; A = A + expc(frann* scrunn) * expc(shenn* (ann+bnn)); } else { ca = 5.0 + a*a + b*b; cb = 2*(a * cos(R)- b * sin(R)); cc = 4*(b * cos(R)+a*sin(R)); scrunn.r = ca + cb + cc; A = A + expc(frann*scrunn)*expc(shenn*(ann+bnn)); } } } return A; } __device__ cuComplex thetta(cuComplex tau, cuComplex z) { /* note that as I'm not immediately doing this on the unit circle, as the real action is considered to happen on the z-plane, we don't yet need to fret about whether I'm looking at things in terms of tau or in terms of q, next revision */ /* set accumulant to zero */ cuComplex A(0.0,0.0); /* miscellaneous setup */ cuComplex pai(3.14159265353898,0.0); cuComplex ai(0.0,1.0); cuComplex oo(1.0,0.0); cuComplex oot(2.0,0.0); cuComplex nini(9.0,0.0); cuComplex eigh(-18.0,0.0); /* cuComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */ cuComplex frann(1.0,0.0); frann = pai * ai * tau ; cuComplex shenn(1.0,0.0); shenn = oot * ai * z; cuComplex plenn(1.0,0.0); cuComplex enn(1.0,0.0); int n; for(n=-10;n<10;n++) { enn.r = n; plenn = enn * enn; /* this get the cuComplex out of the event loop */ A = A + expc(frann* plenn) * expc(shenn* enn); } return A; } __device__ cuComplex mitlef(cuComplex z,cuComplex c) { cuComplex out(0.0,0.0); cuComplex Z(1.0,0.0); cuComplex frove(0.0,0.0); int v; for(v=0;v<20;v++) { frove.r = tgammaf(c.r*v+c.i); out = out + Z/frove; Z = Z * z; } return out; } __device__ cuComplex hoova(cuComplex z) { cuComplex out(j0f(z.r),j1f(z.i)); return out; } __device__ cuComplex arago(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * hoova(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ unsigned char clip(int n) { return n > 255 ? 255 : (n < 0 ? 0 : n); } __global__ void distanceKernel(uchar4 *d_out, int w, int h, int2 pos) { const int c = blockIdx.x*blockDim.x + threadIdx.x; const int r= blockIdx.y*blockDim.y + threadIdx.y; const int i = c + r*w; // 1D indexing float pi = 3.1415926535898; cuComplex ip(pi,0.0); const float scale = 20.0; float fx = -scale * (float)(DIM/2 - c)/(DIM/2); float fy = scale * (float)(DIM/2 - r)/(DIM/2); cuComplex effx(fx,0.0); cuComplex effy(fy,0.0); float LA = -scale * (float)(DIM/2 - pos.x)/(DIM/2); float LB = scale * (float)(DIM/2 - pos.y)/(DIM/2); cuComplex mouse(LA,LB); cuComplex moux(LA,0.0); cuComplex mouy(0.0,LB); cuComplex q(fx,fy); /* cuComplex tik(sin(ticks/40.0f),0.0);*/ /* cuComplex uon(cosf(-2*pi*ticks/16384.0),sinf(-2*pi*ticks/16384.0)); cuComplex aon(cosf(2.6457513110645912*2*pi*ticks/1024),sinf(2.645751311064591*2*pi*ticks/1024)); cuComplex eon(cosf(-2.6457513110645912*2*pi*ticks/1024.0),sinf(2.645751311064591*2*pi*ticks/1024.0));*/ cuComplex fixon(.029348,.828934); cuComplex faxon(.029348,-.828934); cuComplex unity(1.0,0.0); cuComplex ai(0.0,1.0); cuComplex aon = expc(ai*moux); cuComplex uon= expc(mouy); cuComplex flurn(0.0,0.0); cuComplex accume(0.0,0.0); cuComplex eccume(0.0,0.0); cuComplex rhun(1.02871376821872462237195122725097462534904479,0.0); cuComplex cue = q; cuComplex lam(0.73736887807831963, -0.67549029426152396); cuComplex due(3.0,0.0); cuComplex tir(2.0,0.0); cuComplex selga(3.5,0.0); cuComplex vro(-1.0,0.0); cuComplex tle(1.0,0.0); cuComplex sle(4.0,0.0); cuComplex cherra(0.62348980185873359, 0.7818314824680298); cuComplex lerra = cherra*cherra; cuComplex ferra = lerra * cherra; cuComplex terra = ferra * cherra; cuComplex zerra = terra * cherra; cuComplex nerra = zerra * cherra; cuComplex vlarv(1/3.0,0.0); cuComplex sugna(0.70710678118654757, 0.70710678118654746); cuComplex regna(0.99966573338968745, 0.025853848581176047); cuComplex spa(sqrtf(2.0),0.0); cuComplex spb(sqrtf(3.0),0.0); cuComplex spc(sqrtf(4.0),0.0); cuComplex spd(sqrtf(5.0),0.0); cuComplex mrun(1/2.0,0.0); cuComplex gloon (4.0,0.0); cuComplex plenod(-.01,0.0); cuComplex nue = cue; cuComplex bor(-10.0,0.0); cuComplex nat(0.0,-10.0); cuComplex rhus(1.0,0.0); cuComplex D(0.739085133215160641655312087674,0.0); /* if ((c >= w) || (r >= h)) return; // Check if within image bounds const int i = c + r*w; // 1D indexing const int dist = sqrtf((c - pos.x)*(c - pos.x) + (r - pos.y)*(r - pos.y)); const unsigned char intensity = clip(255 - dist);*/ // theta function varying on constant // cue =thess(cue,fixon*mouse); int v=1; int axa=-10; /*while((v<100)&&norg(cue)<2.0) { cue = cue*(cue-mouy)*(cue-moux) -cue * q; v++; }*/ for(v=0;v<10;v++) { cue = cue - hoova(aon+cue*hoova(cue*uon)); } cuComplex kei=unity; double tha; tha = ((atan2(cue.i,cue.r) - pi)/(2.0*pi)); d_out[i].x = (unsigned char) (255.0*pow(sin(pi*tha),2)); d_out[i].y = (unsigned char) (255.0*pow(sin(pi*tha+pi/3),2)); d_out[i].z = (unsigned char) (255.0*pow(sin(pi*tha+2*pi/3),2)); d_out[i].w = 255; } void kernelLauncher(uchar4 *d_out, int w, int h, int2 pos) { const dim3 blockSize(TX, TY); const dim3 gridSize = dim3((w + TX - 1)/TX, (h + TY - 1)/TY); distanceKernel<<<gridSize, blockSize>>>(d_out, w, h, pos); } /*for(v=1;v<5;v++) { cue = cue - cue * (expc(unity-cue/moux)+expc(cue-unity/mouy))/((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy)); accume = accume + ((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy)); } cue = accume;*/ /*cue = ramchi(moeb(unity,uon*fixon,q))*rampsi(moeb(unity,uon*fixon,q)); rhus = ramchi(uon/moeb(unity,uon*faxon,unity/q))*ramphi(uon/moeb(unity,uon*faxon,unity/q)); cue = rhus+cue; cue = cosc(unity/(unity-uon*cue))*rampsi(moeb(unity,uon*fixon,q));*/ /*for(v=0;v<60;v++){ cue = moeb(aon,fixon,cue) - aon/((expc(uon*cue-sins(cue))-cue)/((aon+cosc(cue)) * expc(uon*cue-sins(cue))-aon)); accume = accume *(unity - (expc(aon*moeb(uon,faxon,cue))-sins(moeb(aon,fixon,cue))-cue)); } cue = accume;*/ /* One for (x+d)/cos(d) -cos(x)/d Tungilipa D = cos(D) cos(sqrt(x*D))/D -1 = 0.0 The other for cos(x)-x Eripgrunna */
bf4af7218fbbda920790190c2c81dc976caee0b0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2017-2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <utility> #include "dali/core/convert.h" #include "dali/core/cuda_utils.h" #include "dali/core/error_handling.h" #include "dali/core/static_switch.h" #include "dali/kernels/common/block_setup.h" #include "dali/operators/generic/cast.h" namespace dali { template <typename OType, typename IType> __global__ void BatchedCastKernel(const CastSampleDesc *samples, const kernels::BlockDesc<1> *blocks) { const auto &block = blocks[blockIdx.x]; const auto &sample = samples[block.sample_idx]; auto *out = reinterpret_cast<OType *>(sample.output); const auto *in = reinterpret_cast<const IType *>(sample.input); for (int x = threadIdx.x + block.start.x; x < block.end.x; x += blockDim.x) { out[x] = ConvertSat<OType>(in[x]); } } template <> void Cast<GPUBackend>::PrepareBlocks(const DeviceWorkspace &ws) { const auto &input = ws.Input<GPUBackend>(0); const auto &input_shape = input.shape(); std::array<std::pair<int, int>, 1> collapse_groups = {{{0, input_shape.sample_dim()}}}; auto collapsed_shape = collapse_dims<1>(input.shape(), collapse_groups); block_setup_.SetupBlocks(collapsed_shape, true); blocks_dev_.from_host(block_setup_.Blocks(), ws.stream()); } template <> void Cast<GPUBackend>::RunImpl(DeviceWorkspace &ws) { const auto &input = ws.Input<GPUBackend>(0); const auto &input_shape = input.shape(); auto &output = ws.Output<GPUBackend>(0); output.SetLayout(input.GetLayout()); auto num_samples = input_shape.num_samples(); samples_.resize(num_samples); for (int sample_id = 0; sample_id < num_samples; sample_id++) { samples_[sample_id].output = output.raw_mutable_tensor(sample_id); samples_[sample_id].input = input.raw_tensor(sample_id); } samples_dev_.from_host(samples_, ws.stream()); DALIDataType itype = input.type(); dim3 grid_dim = block_setup_.GridDim(); dim3 block_dim = block_setup_.BlockDim(); TYPE_SWITCH(output_type_, type2id, OType, CAST_ALLOWED_TYPES, ( TYPE_SWITCH(itype, type2id, IType, CAST_ALLOWED_TYPES, ( hipLaunchKernelGGL(( BatchedCastKernel<OType, IType>) , dim3(grid_dim), dim3(block_dim), 0, ws.stream(), samples_dev_.data(), blocks_dev_.data()); ), DALI_FAIL(make_string("Invalid input type: ", itype));); // NOLINT(whitespace/parens) ), DALI_FAIL(make_string("Invalid output type: ", output_type_));); // NOLINT(whitespace/parens) } DALI_REGISTER_OPERATOR(Cast, Cast<GPUBackend>, GPU); } // namespace dali
bf4af7218fbbda920790190c2c81dc976caee0b0.cu
// Copyright (c) 2017-2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <utility> #include "dali/core/convert.h" #include "dali/core/cuda_utils.h" #include "dali/core/error_handling.h" #include "dali/core/static_switch.h" #include "dali/kernels/common/block_setup.h" #include "dali/operators/generic/cast.h" namespace dali { template <typename OType, typename IType> __global__ void BatchedCastKernel(const CastSampleDesc *samples, const kernels::BlockDesc<1> *blocks) { const auto &block = blocks[blockIdx.x]; const auto &sample = samples[block.sample_idx]; auto *out = reinterpret_cast<OType *>(sample.output); const auto *in = reinterpret_cast<const IType *>(sample.input); for (int x = threadIdx.x + block.start.x; x < block.end.x; x += blockDim.x) { out[x] = ConvertSat<OType>(in[x]); } } template <> void Cast<GPUBackend>::PrepareBlocks(const DeviceWorkspace &ws) { const auto &input = ws.Input<GPUBackend>(0); const auto &input_shape = input.shape(); std::array<std::pair<int, int>, 1> collapse_groups = {{{0, input_shape.sample_dim()}}}; auto collapsed_shape = collapse_dims<1>(input.shape(), collapse_groups); block_setup_.SetupBlocks(collapsed_shape, true); blocks_dev_.from_host(block_setup_.Blocks(), ws.stream()); } template <> void Cast<GPUBackend>::RunImpl(DeviceWorkspace &ws) { const auto &input = ws.Input<GPUBackend>(0); const auto &input_shape = input.shape(); auto &output = ws.Output<GPUBackend>(0); output.SetLayout(input.GetLayout()); auto num_samples = input_shape.num_samples(); samples_.resize(num_samples); for (int sample_id = 0; sample_id < num_samples; sample_id++) { samples_[sample_id].output = output.raw_mutable_tensor(sample_id); samples_[sample_id].input = input.raw_tensor(sample_id); } samples_dev_.from_host(samples_, ws.stream()); DALIDataType itype = input.type(); dim3 grid_dim = block_setup_.GridDim(); dim3 block_dim = block_setup_.BlockDim(); TYPE_SWITCH(output_type_, type2id, OType, CAST_ALLOWED_TYPES, ( TYPE_SWITCH(itype, type2id, IType, CAST_ALLOWED_TYPES, ( BatchedCastKernel<OType, IType> <<<grid_dim, block_dim, 0, ws.stream()>>>(samples_dev_.data(), blocks_dev_.data()); ), DALI_FAIL(make_string("Invalid input type: ", itype));); // NOLINT(whitespace/parens) ), DALI_FAIL(make_string("Invalid output type: ", output_type_));); // NOLINT(whitespace/parens) } DALI_REGISTER_OPERATOR(Cast, Cast<GPUBackend>, GPU); } // namespace dali
89899e076e0b051faca5549083ddaa64c96d127e.hip
// !!! This is a file automatically generated by hipify!!! // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <sys/time.h> #include <sys/types.h> #include <sys/stat.h> #include <fcntl.h> #include <sys/mman.h> #include <unistd.h> #include "srad.h" // includes, project #include <hip/hip_runtime.h> // includes, kernels #include "srad_kernel.hip" #define CUDA_CALL_SAFE(f) \ do \ { \ hipError_t _cuda_error = f; \ if (_cuda_error != hipSuccess) \ { \ fprintf(stderr, \ "%s, %d, CUDA ERROR: %s %s\n", \ __FILE__, \ __LINE__, \ hipGetErrorName(_cuda_error), \ hipGetErrorString(_cuda_error) \ ); \ abort(); \ exit(EXIT_FAILURE); \ } \ } while (0) static inline double time_diff(struct timeval tv_start, struct timeval tv_end) { return (double)(tv_end.tv_sec - tv_start.tv_sec) * 1000.0 + (double)(tv_end.tv_usec - tv_start.tv_usec) / 1000.0; } void runTest( int argc, char** argv); void usage(int argc, char **argv) { fprintf(stderr, "Usage: %s <rows/cols> <folder>\n", argv[0]); exit(1); } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { printf("WG size of kernel = %d X %d\n", BLOCK_SIZE, BLOCK_SIZE); runTest( argc, argv); return EXIT_SUCCESS; } void runTest( int argc, char** argv) { long rows, cols, size_I, size_R, niter = 10, iter; float *J, lambda, q0sqr, sum, sum2, tmp, meanROI,varROI ; float *J_cuda; float *C_cuda; float *E_C, *W_C, *N_C, *S_C; unsigned long r1, r2, c1, c2; char *folder; char *filepath; int fd_J, fd_C, fd_E, fd_W, fd_S, fd_N; struct timeval tv_start, tv_end; double kernel_time = 0; // in ms double map_time = 0; // in ms double free_time = 0; // in ms if (argc == 3) { rows = atol(argv[1]); //number of rows in the domain cols = rows; //number of cols in the domain if ((rows%16!=0) || (cols%16!=0)) { fprintf(stderr, "rows and cols must be multiples of 16\n"); exit(1); } folder = argv[2]; r1 = 0; //y1 position of the speckle r2 = 127; //y2 position of the speckle c1 = 0; //x1 position of the speckle c2 = 127; //x2 position of the speckle lambda = 0.5; //Lambda value niter = 2; //number of iterations } else { usage(argc, argv); } size_I = cols * rows; size_R = (r2-r1+1)*(c2-c1+1); filepath = (char *)malloc(sizeof(char) * (strlen(folder) + 128)); //Allocate device memory gettimeofday(&tv_start, NULL); sprintf(filepath, "%s/J.hostreg.mem", folder); if ((fd_J = open(filepath, O_LARGEFILE | O_RDWR)) < 0) { fprintf(stderr, "%s was not opened\n", filepath); exit(EXIT_FAILURE); } if ((J = (float *)mmap(NULL, sizeof(float) * size_I, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_NORESERVE, fd_J, 0)) == MAP_FAILED) { fprintf(stderr, "Cannot mmap %s\n", filepath); exit(EXIT_FAILURE); } CUDA_CALL_SAFE(hipHostRegister(J, sizeof(float) * size_I, hipHostRegisterDefault)); sprintf(filepath, "%s/C_cuda.hostreg.mem", folder); if ((fd_C = open(filepath, O_LARGEFILE | O_RDWR | O_CREAT)) < 0) { fprintf(stderr, "%s was not opened\n", filepath); exit(EXIT_FAILURE); } if (ftruncate(fd_C, sizeof(float) * size_I) != 0) { fprintf(stderr, "Cannot truncate file %s\n", filepath); exit(EXIT_FAILURE); } if ((C_cuda = (float *)mmap(NULL, sizeof(float) * size_I, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_NORESERVE, fd_C, 0)) == MAP_FAILED) { fprintf(stderr, "Cannot mmap %s\n", filepath); exit(EXIT_FAILURE); } CUDA_CALL_SAFE(hipHostRegister(C_cuda, sizeof(float) * size_I, hipHostRegisterDefault)); sprintf(filepath, "%s/E_C.hostreg.mem", folder); if ((fd_E = open(filepath, O_LARGEFILE | O_RDWR | O_CREAT)) < 0) { fprintf(stderr, "%s was not opened\n", filepath); exit(EXIT_FAILURE); } if (ftruncate(fd_E, sizeof(float) * size_I) != 0) { fprintf(stderr, "Cannot truncate file %s\n", filepath); exit(EXIT_FAILURE); } if ((E_C = (float *)mmap(NULL, sizeof(float) * size_I, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_NORESERVE, fd_E, 0)) == MAP_FAILED) { fprintf(stderr, "Cannot mmap %s\n", filepath); exit(EXIT_FAILURE); } CUDA_CALL_SAFE(hipHostRegister(E_C, sizeof(float) * size_I, hipHostRegisterDefault)); sprintf(filepath, "%s/W_C.hostreg.mem", folder); if ((fd_W = open(filepath, O_LARGEFILE | O_RDWR | O_CREAT)) < 0) { fprintf(stderr, "%s was not opened\n", filepath); exit(EXIT_FAILURE); } if (ftruncate(fd_W, sizeof(float) * size_I) != 0) { fprintf(stderr, "Cannot truncate file %s\n", filepath); exit(EXIT_FAILURE); } if ((W_C = (float *)mmap(NULL, sizeof(float) * size_I, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_NORESERVE, fd_W, 0)) == MAP_FAILED) { fprintf(stderr, "Cannot mmap %s\n", filepath); exit(EXIT_FAILURE); } CUDA_CALL_SAFE(hipHostRegister(W_C, sizeof(float) * size_I, hipHostRegisterDefault)); sprintf(filepath, "%s/S_C.hostreg.mem", folder); if ((fd_S = open(filepath, O_LARGEFILE | O_RDWR | O_CREAT)) < 0) { fprintf(stderr, "%s was not opened\n", filepath); exit(EXIT_FAILURE); } if (ftruncate(fd_S, sizeof(float) * size_I) != 0) { fprintf(stderr, "Cannot truncate file %s\n", filepath); exit(EXIT_FAILURE); } if ((S_C = (float *)mmap(NULL, sizeof(float) * size_I, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_NORESERVE, fd_S, 0)) == MAP_FAILED) { fprintf(stderr, "Cannot mmap %s\n", filepath); exit(EXIT_FAILURE); } CUDA_CALL_SAFE(hipHostRegister(S_C, sizeof(float) * size_I, hipHostRegisterDefault)); sprintf(filepath, "%s/N_C.hostreg.mem", folder); if ((fd_N = open(filepath, O_LARGEFILE | O_RDWR | O_CREAT)) < 0) { fprintf(stderr, "%s was not opened\n", filepath); exit(EXIT_FAILURE); } if (ftruncate(fd_N, sizeof(float) * size_I) != 0) { fprintf(stderr, "Cannot truncate file %s\n", filepath); exit(EXIT_FAILURE); } if ((N_C = (float *)mmap(NULL, sizeof(float) * size_I, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_NORESERVE, fd_N, 0)) == MAP_FAILED) { fprintf(stderr, "Cannot mmap %s\n", filepath); exit(EXIT_FAILURE); } CUDA_CALL_SAFE(hipHostRegister(N_C, sizeof(float) * size_I, hipHostRegisterDefault)); gettimeofday(&tv_end, NULL); map_time += time_diff(tv_start, tv_end); J_cuda = J; printf("Start the SRAD main loop\n"); gettimeofday(&tv_start, NULL); for (iter=0; iter< niter; iter++){ sum=0; sum2=0; for (long i=r1; i<=r2; i++) { for (long j=c1; j<=c2; j++) { tmp = J[i * cols + j]; sum += tmp ; sum2 += tmp*tmp; } } meanROI = sum / size_R; varROI = (sum2 / size_R) - meanROI*meanROI; q0sqr = varROI / (meanROI*meanROI); //Currently the input size must be divided by 16 - the block size long block_x = cols/(long)BLOCK_SIZE ; long block_y = rows/(long)BLOCK_SIZE ; dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(block_x , block_y); //Run kernels hipLaunchKernelGGL(( srad_cuda_1), dim3(dimGrid), dim3(dimBlock), 0, 0, E_C, W_C, N_C, S_C, J_cuda, C_cuda, cols, rows, q0sqr); hipLaunchKernelGGL(( srad_cuda_2), dim3(dimGrid), dim3(dimBlock), 0, 0, E_C, W_C, N_C, S_C, J_cuda, C_cuda, cols, rows, lambda, q0sqr); CUDA_CALL_SAFE(hipDeviceSynchronize()); } gettimeofday(&tv_end, NULL); kernel_time += time_diff(tv_start, tv_end); gettimeofday(&tv_start, NULL); CUDA_CALL_SAFE(hipHostUnregister(J)); if (msync(J, sizeof(float) * size_I, MS_SYNC) != 0) { fprintf(stderr, "Cannot msync J\n"); perror("msync"); exit(EXIT_FAILURE); } if (munmap(J, sizeof(float) * size_I) != 0) { fprintf(stderr, "Cannot munmap J\n"); perror("munmap"); exit(EXIT_FAILURE); } fsync(fd_J); close(fd_J); CUDA_CALL_SAFE(hipHostUnregister(C_cuda)); if (msync(C_cuda, sizeof(float) * size_I, MS_SYNC) != 0) { fprintf(stderr, "Cannot msync C_cuda\n"); perror("msync"); exit(EXIT_FAILURE); } if (munmap(C_cuda, sizeof(float) * size_I) != 0) { fprintf(stderr, "Cannot munmap C_cuda\n"); perror("munmap"); exit(EXIT_FAILURE); } fsync(fd_C); close(fd_C); CUDA_CALL_SAFE(hipHostUnregister(E_C)); if (msync(E_C, sizeof(float) * size_I, MS_SYNC) != 0) { fprintf(stderr, "Cannot msync E_C\n"); perror("msync"); exit(EXIT_FAILURE); } if (munmap(E_C, sizeof(float) * size_I) != 0) { fprintf(stderr, "Cannot munmap E_C\n"); perror("munmap"); exit(EXIT_FAILURE); } fsync(fd_E); close(fd_E); CUDA_CALL_SAFE(hipHostUnregister(W_C)); if (msync(W_C, sizeof(float) * size_I, MS_SYNC) != 0) { fprintf(stderr, "Cannot msync W_C\n"); perror("msync"); exit(EXIT_FAILURE); } if (munmap(W_C, sizeof(float) * size_I) != 0) { fprintf(stderr, "Cannot munmap W_C\n"); perror("munmap"); exit(EXIT_FAILURE); } fsync(fd_W); close(fd_W); CUDA_CALL_SAFE(hipHostUnregister(S_C)); if (msync(S_C, sizeof(float) * size_I, MS_SYNC) != 0) { fprintf(stderr, "Cannot msync S_C\n"); perror("msync"); exit(EXIT_FAILURE); } if (munmap(S_C, sizeof(float) * size_I) != 0) { fprintf(stderr, "Cannot munmap S_C\n"); perror("munmap"); exit(EXIT_FAILURE); } fsync(fd_S); close(fd_S); CUDA_CALL_SAFE(hipHostUnregister(N_C)); if (msync(N_C, sizeof(float) * size_I, MS_SYNC) != 0) { fprintf(stderr, "Cannot msync N_C\n"); perror("msync"); exit(EXIT_FAILURE); } if (munmap(N_C, sizeof(float) * size_I) != 0) { fprintf(stderr, "Cannot munmap N_C\n"); perror("munmap"); exit(EXIT_FAILURE); } fsync(fd_N); close(fd_N); gettimeofday(&tv_end, NULL); free_time += time_diff(tv_start, tv_end); printf("Computation Done\n"); free(filepath); printf("==> header: kernel_time (ms),map_time (ms),free_time (ms)\n"); printf("==> data: %f,%f,%f\n", kernel_time, map_time, free_time); }
89899e076e0b051faca5549083ddaa64c96d127e.cu
// includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <sys/time.h> #include <sys/types.h> #include <sys/stat.h> #include <fcntl.h> #include <sys/mman.h> #include <unistd.h> #include "srad.h" // includes, project #include <cuda.h> // includes, kernels #include "srad_kernel.cu" #define CUDA_CALL_SAFE(f) \ do \ { \ cudaError_t _cuda_error = f; \ if (_cuda_error != cudaSuccess) \ { \ fprintf(stderr, \ "%s, %d, CUDA ERROR: %s %s\n", \ __FILE__, \ __LINE__, \ cudaGetErrorName(_cuda_error), \ cudaGetErrorString(_cuda_error) \ ); \ abort(); \ exit(EXIT_FAILURE); \ } \ } while (0) static inline double time_diff(struct timeval tv_start, struct timeval tv_end) { return (double)(tv_end.tv_sec - tv_start.tv_sec) * 1000.0 + (double)(tv_end.tv_usec - tv_start.tv_usec) / 1000.0; } void runTest( int argc, char** argv); void usage(int argc, char **argv) { fprintf(stderr, "Usage: %s <rows/cols> <folder>\n", argv[0]); exit(1); } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { printf("WG size of kernel = %d X %d\n", BLOCK_SIZE, BLOCK_SIZE); runTest( argc, argv); return EXIT_SUCCESS; } void runTest( int argc, char** argv) { long rows, cols, size_I, size_R, niter = 10, iter; float *J, lambda, q0sqr, sum, sum2, tmp, meanROI,varROI ; float *J_cuda; float *C_cuda; float *E_C, *W_C, *N_C, *S_C; unsigned long r1, r2, c1, c2; char *folder; char *filepath; int fd_J, fd_C, fd_E, fd_W, fd_S, fd_N; struct timeval tv_start, tv_end; double kernel_time = 0; // in ms double map_time = 0; // in ms double free_time = 0; // in ms if (argc == 3) { rows = atol(argv[1]); //number of rows in the domain cols = rows; //number of cols in the domain if ((rows%16!=0) || (cols%16!=0)) { fprintf(stderr, "rows and cols must be multiples of 16\n"); exit(1); } folder = argv[2]; r1 = 0; //y1 position of the speckle r2 = 127; //y2 position of the speckle c1 = 0; //x1 position of the speckle c2 = 127; //x2 position of the speckle lambda = 0.5; //Lambda value niter = 2; //number of iterations } else { usage(argc, argv); } size_I = cols * rows; size_R = (r2-r1+1)*(c2-c1+1); filepath = (char *)malloc(sizeof(char) * (strlen(folder) + 128)); //Allocate device memory gettimeofday(&tv_start, NULL); sprintf(filepath, "%s/J.hostreg.mem", folder); if ((fd_J = open(filepath, O_LARGEFILE | O_RDWR)) < 0) { fprintf(stderr, "%s was not opened\n", filepath); exit(EXIT_FAILURE); } if ((J = (float *)mmap(NULL, sizeof(float) * size_I, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_NORESERVE, fd_J, 0)) == MAP_FAILED) { fprintf(stderr, "Cannot mmap %s\n", filepath); exit(EXIT_FAILURE); } CUDA_CALL_SAFE(cudaHostRegister(J, sizeof(float) * size_I, cudaHostRegisterDefault)); sprintf(filepath, "%s/C_cuda.hostreg.mem", folder); if ((fd_C = open(filepath, O_LARGEFILE | O_RDWR | O_CREAT)) < 0) { fprintf(stderr, "%s was not opened\n", filepath); exit(EXIT_FAILURE); } if (ftruncate(fd_C, sizeof(float) * size_I) != 0) { fprintf(stderr, "Cannot truncate file %s\n", filepath); exit(EXIT_FAILURE); } if ((C_cuda = (float *)mmap(NULL, sizeof(float) * size_I, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_NORESERVE, fd_C, 0)) == MAP_FAILED) { fprintf(stderr, "Cannot mmap %s\n", filepath); exit(EXIT_FAILURE); } CUDA_CALL_SAFE(cudaHostRegister(C_cuda, sizeof(float) * size_I, cudaHostRegisterDefault)); sprintf(filepath, "%s/E_C.hostreg.mem", folder); if ((fd_E = open(filepath, O_LARGEFILE | O_RDWR | O_CREAT)) < 0) { fprintf(stderr, "%s was not opened\n", filepath); exit(EXIT_FAILURE); } if (ftruncate(fd_E, sizeof(float) * size_I) != 0) { fprintf(stderr, "Cannot truncate file %s\n", filepath); exit(EXIT_FAILURE); } if ((E_C = (float *)mmap(NULL, sizeof(float) * size_I, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_NORESERVE, fd_E, 0)) == MAP_FAILED) { fprintf(stderr, "Cannot mmap %s\n", filepath); exit(EXIT_FAILURE); } CUDA_CALL_SAFE(cudaHostRegister(E_C, sizeof(float) * size_I, cudaHostRegisterDefault)); sprintf(filepath, "%s/W_C.hostreg.mem", folder); if ((fd_W = open(filepath, O_LARGEFILE | O_RDWR | O_CREAT)) < 0) { fprintf(stderr, "%s was not opened\n", filepath); exit(EXIT_FAILURE); } if (ftruncate(fd_W, sizeof(float) * size_I) != 0) { fprintf(stderr, "Cannot truncate file %s\n", filepath); exit(EXIT_FAILURE); } if ((W_C = (float *)mmap(NULL, sizeof(float) * size_I, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_NORESERVE, fd_W, 0)) == MAP_FAILED) { fprintf(stderr, "Cannot mmap %s\n", filepath); exit(EXIT_FAILURE); } CUDA_CALL_SAFE(cudaHostRegister(W_C, sizeof(float) * size_I, cudaHostRegisterDefault)); sprintf(filepath, "%s/S_C.hostreg.mem", folder); if ((fd_S = open(filepath, O_LARGEFILE | O_RDWR | O_CREAT)) < 0) { fprintf(stderr, "%s was not opened\n", filepath); exit(EXIT_FAILURE); } if (ftruncate(fd_S, sizeof(float) * size_I) != 0) { fprintf(stderr, "Cannot truncate file %s\n", filepath); exit(EXIT_FAILURE); } if ((S_C = (float *)mmap(NULL, sizeof(float) * size_I, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_NORESERVE, fd_S, 0)) == MAP_FAILED) { fprintf(stderr, "Cannot mmap %s\n", filepath); exit(EXIT_FAILURE); } CUDA_CALL_SAFE(cudaHostRegister(S_C, sizeof(float) * size_I, cudaHostRegisterDefault)); sprintf(filepath, "%s/N_C.hostreg.mem", folder); if ((fd_N = open(filepath, O_LARGEFILE | O_RDWR | O_CREAT)) < 0) { fprintf(stderr, "%s was not opened\n", filepath); exit(EXIT_FAILURE); } if (ftruncate(fd_N, sizeof(float) * size_I) != 0) { fprintf(stderr, "Cannot truncate file %s\n", filepath); exit(EXIT_FAILURE); } if ((N_C = (float *)mmap(NULL, sizeof(float) * size_I, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_NORESERVE, fd_N, 0)) == MAP_FAILED) { fprintf(stderr, "Cannot mmap %s\n", filepath); exit(EXIT_FAILURE); } CUDA_CALL_SAFE(cudaHostRegister(N_C, sizeof(float) * size_I, cudaHostRegisterDefault)); gettimeofday(&tv_end, NULL); map_time += time_diff(tv_start, tv_end); J_cuda = J; printf("Start the SRAD main loop\n"); gettimeofday(&tv_start, NULL); for (iter=0; iter< niter; iter++){ sum=0; sum2=0; for (long i=r1; i<=r2; i++) { for (long j=c1; j<=c2; j++) { tmp = J[i * cols + j]; sum += tmp ; sum2 += tmp*tmp; } } meanROI = sum / size_R; varROI = (sum2 / size_R) - meanROI*meanROI; q0sqr = varROI / (meanROI*meanROI); //Currently the input size must be divided by 16 - the block size long block_x = cols/(long)BLOCK_SIZE ; long block_y = rows/(long)BLOCK_SIZE ; dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(block_x , block_y); //Run kernels srad_cuda_1<<<dimGrid, dimBlock>>>(E_C, W_C, N_C, S_C, J_cuda, C_cuda, cols, rows, q0sqr); srad_cuda_2<<<dimGrid, dimBlock>>>(E_C, W_C, N_C, S_C, J_cuda, C_cuda, cols, rows, lambda, q0sqr); CUDA_CALL_SAFE(cudaThreadSynchronize()); } gettimeofday(&tv_end, NULL); kernel_time += time_diff(tv_start, tv_end); gettimeofday(&tv_start, NULL); CUDA_CALL_SAFE(cudaHostUnregister(J)); if (msync(J, sizeof(float) * size_I, MS_SYNC) != 0) { fprintf(stderr, "Cannot msync J\n"); perror("msync"); exit(EXIT_FAILURE); } if (munmap(J, sizeof(float) * size_I) != 0) { fprintf(stderr, "Cannot munmap J\n"); perror("munmap"); exit(EXIT_FAILURE); } fsync(fd_J); close(fd_J); CUDA_CALL_SAFE(cudaHostUnregister(C_cuda)); if (msync(C_cuda, sizeof(float) * size_I, MS_SYNC) != 0) { fprintf(stderr, "Cannot msync C_cuda\n"); perror("msync"); exit(EXIT_FAILURE); } if (munmap(C_cuda, sizeof(float) * size_I) != 0) { fprintf(stderr, "Cannot munmap C_cuda\n"); perror("munmap"); exit(EXIT_FAILURE); } fsync(fd_C); close(fd_C); CUDA_CALL_SAFE(cudaHostUnregister(E_C)); if (msync(E_C, sizeof(float) * size_I, MS_SYNC) != 0) { fprintf(stderr, "Cannot msync E_C\n"); perror("msync"); exit(EXIT_FAILURE); } if (munmap(E_C, sizeof(float) * size_I) != 0) { fprintf(stderr, "Cannot munmap E_C\n"); perror("munmap"); exit(EXIT_FAILURE); } fsync(fd_E); close(fd_E); CUDA_CALL_SAFE(cudaHostUnregister(W_C)); if (msync(W_C, sizeof(float) * size_I, MS_SYNC) != 0) { fprintf(stderr, "Cannot msync W_C\n"); perror("msync"); exit(EXIT_FAILURE); } if (munmap(W_C, sizeof(float) * size_I) != 0) { fprintf(stderr, "Cannot munmap W_C\n"); perror("munmap"); exit(EXIT_FAILURE); } fsync(fd_W); close(fd_W); CUDA_CALL_SAFE(cudaHostUnregister(S_C)); if (msync(S_C, sizeof(float) * size_I, MS_SYNC) != 0) { fprintf(stderr, "Cannot msync S_C\n"); perror("msync"); exit(EXIT_FAILURE); } if (munmap(S_C, sizeof(float) * size_I) != 0) { fprintf(stderr, "Cannot munmap S_C\n"); perror("munmap"); exit(EXIT_FAILURE); } fsync(fd_S); close(fd_S); CUDA_CALL_SAFE(cudaHostUnregister(N_C)); if (msync(N_C, sizeof(float) * size_I, MS_SYNC) != 0) { fprintf(stderr, "Cannot msync N_C\n"); perror("msync"); exit(EXIT_FAILURE); } if (munmap(N_C, sizeof(float) * size_I) != 0) { fprintf(stderr, "Cannot munmap N_C\n"); perror("munmap"); exit(EXIT_FAILURE); } fsync(fd_N); close(fd_N); gettimeofday(&tv_end, NULL); free_time += time_diff(tv_start, tv_end); printf("Computation Done\n"); free(filepath); printf("==> header: kernel_time (ms),map_time (ms),free_time (ms)\n"); printf("==> data: %f,%f,%f\n", kernel_time, map_time, free_time); }
2e1542516a20dc4797d7b59744a75d28e793e71c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/mean_iou_op.h" #include "paddle/fluid/memory/malloc.h" #include "paddle/fluid/platform/device/gpu/gpu_info.h" #include "paddle/phi/backends/gpu/gpu_primitives.h" #include "paddle/phi/kernels/funcs/math_function.h" namespace paddle { namespace operators { using phi::PADDLE_CUDA_NUM_THREADS; template <typename T> __global__ void CountCUDAKernel(const int num_classes, const int count, const T* predictions, const T* labels, int* wrong, int* correct) { extern __shared__ int blcok_cache[]; int* wrong_c = blcok_cache; int* correct_c = blcok_cache + num_classes; // init cache for (int i = threadIdx.x; i < num_classes * 2; i += blockDim.x) { blcok_cache[i] = 0; } __syncthreads(); T pred; T label; CUDA_KERNEL_LOOP(i, count) { pred = predictions[i]; label = labels[i]; if (pred == label) { atomicAdd(correct_c + pred, 1); } else { atomicAdd(wrong_c + pred, 1); atomicAdd(wrong_c + label, 1); } } __syncthreads(); for (int i = threadIdx.x; i < num_classes; i += blockDim.x) { atomicAdd(wrong + i, wrong_c[i]); atomicAdd(correct + i, correct_c[i]); } } __global__ void ComputeIoUCUDAKernel( const int num_classes, int* wrong, int* correct, float* ious, float* iou) { __shared__ int valid_count_c; if (threadIdx.x == 0) { valid_count_c = 0; } __syncthreads(); CUDA_KERNEL_LOOP(i, num_classes) { int wrong_n = wrong[i]; int correct_n = correct[i]; int denominator = wrong_n + correct_n; if (denominator > 0) { atomicAdd(&valid_count_c, 1); ious[i] = static_cast<float>(correct_n) / denominator; } else { ious[i] = 0; } } __syncthreads(); if (threadIdx.x == 0) { float iou_sum = 0; for (int i = 0; i < num_classes; ++i) { iou_sum += ious[i]; } iou[0] += iou_sum / valid_count_c; } } template <typename T, typename DeviceContext> class MeanIoUCUDAOpKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto& dev_ctx = ctx.template device_context<phi::GPUContext>(); auto& place = *dev_ctx.eigen_device(); // get input and output tensor auto* predictions = ctx.Input<phi::DenseTensor>("Predictions"); auto* labels = ctx.Input<phi::DenseTensor>("Labels"); auto* out_mean_iou = ctx.Output<phi::DenseTensor>("OutMeanIou"); auto* out_wrong = ctx.Output<phi::DenseTensor>("OutWrong"); auto* out_correct = ctx.Output<phi::DenseTensor>("OutCorrect"); int num_classes = static_cast<int>(ctx.Attr<int>("num_classes")); // Get data ptr const T* predictions_data = predictions->data<T>(); const T* labels_data = labels->data<T>(); int* out_wrong_data = out_wrong->mutable_data<int>(ctx.GetPlace()); int* out_correct_data = out_correct->mutable_data<int>(ctx.GetPlace()); float* out_mean_iou_data = out_mean_iou->mutable_data<float>(ctx.GetPlace()); // Get Eigen tensor auto out_mean_iou_t = EigenTensor<float, 1>::From(*out_mean_iou); auto out_wrong_t = EigenTensor<int, 1>::From(*out_wrong); auto out_correct_t = EigenTensor<int, 1>::From(*out_correct); // Temporary memory auto tmp_ious_data = memory::Alloc( dev_ctx.GetPlace(), num_classes * sizeof(float), phi::Stream(reinterpret_cast<phi::StreamId>(dev_ctx.stream()))); float* ious_data = static_cast<float*>(tmp_ious_data->ptr()); // Init out_wrong, out_correct and out_mean_iou out_wrong_t.device(place) = out_wrong_t.constant(0); out_correct_t.device(place) = out_correct_t.constant(0); out_mean_iou_t.device(place) = out_mean_iou_t.constant(0.0f); // collect pre wrong, correct and mean_iou auto in_mean_ious = ctx.MultiInput<phi::DenseTensor>("InMeanIou"); for (int i = 0; i < in_mean_ious.size(); ++i) { out_mean_iou_t.device(place) += EigenTensor<float, 1>::From(*in_mean_ious[i]); } auto in_wrongs = ctx.MultiInput<phi::DenseTensor>("InWrongs"); for (int i = 0; i < in_wrongs.size(); ++i) { out_wrong_t.device(place) += EigenTensor<int, 1>::From(*in_wrongs[i]); } auto in_corrects = ctx.MultiInput<phi::DenseTensor>("InCorrects"); for (int i = 0; i < in_corrects.size(); ++i) { out_correct_t.device(place) += EigenTensor<int, 1>::From(*in_corrects[i]); } // compute auto stream = ctx.cuda_device_context().stream(); int block = PADDLE_CUDA_NUM_THREADS; int grid = (predictions->numel() + block - 1) / block; int cache_size = (num_classes * 2 + 1) * sizeof(int); hipLaunchKernelGGL(( CountCUDAKernel<T>) , dim3(grid), dim3(block), cache_size, stream, num_classes, predictions->numel(), predictions_data, labels_data, out_wrong_data, out_correct_data); hipLaunchKernelGGL(( ComputeIoUCUDAKernel), dim3(1), dim3(block), 0, stream, num_classes, out_wrong_data, out_correct_data, ious_data, out_mean_iou_data); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; PD_REGISTER_STRUCT_KERNEL( mean_iou, GPU, ALL_LAYOUT, ops::MeanIoUCUDAOpKernel, int, int64_t) {}
2e1542516a20dc4797d7b59744a75d28e793e71c.cu
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/mean_iou_op.h" #include "paddle/fluid/memory/malloc.h" #include "paddle/fluid/platform/device/gpu/gpu_info.h" #include "paddle/phi/backends/gpu/gpu_primitives.h" #include "paddle/phi/kernels/funcs/math_function.h" namespace paddle { namespace operators { using phi::PADDLE_CUDA_NUM_THREADS; template <typename T> __global__ void CountCUDAKernel(const int num_classes, const int count, const T* predictions, const T* labels, int* wrong, int* correct) { extern __shared__ int blcok_cache[]; int* wrong_c = blcok_cache; int* correct_c = blcok_cache + num_classes; // init cache for (int i = threadIdx.x; i < num_classes * 2; i += blockDim.x) { blcok_cache[i] = 0; } __syncthreads(); T pred; T label; CUDA_KERNEL_LOOP(i, count) { pred = predictions[i]; label = labels[i]; if (pred == label) { atomicAdd(correct_c + pred, 1); } else { atomicAdd(wrong_c + pred, 1); atomicAdd(wrong_c + label, 1); } } __syncthreads(); for (int i = threadIdx.x; i < num_classes; i += blockDim.x) { atomicAdd(wrong + i, wrong_c[i]); atomicAdd(correct + i, correct_c[i]); } } __global__ void ComputeIoUCUDAKernel( const int num_classes, int* wrong, int* correct, float* ious, float* iou) { __shared__ int valid_count_c; if (threadIdx.x == 0) { valid_count_c = 0; } __syncthreads(); CUDA_KERNEL_LOOP(i, num_classes) { int wrong_n = wrong[i]; int correct_n = correct[i]; int denominator = wrong_n + correct_n; if (denominator > 0) { atomicAdd(&valid_count_c, 1); ious[i] = static_cast<float>(correct_n) / denominator; } else { ious[i] = 0; } } __syncthreads(); if (threadIdx.x == 0) { float iou_sum = 0; for (int i = 0; i < num_classes; ++i) { iou_sum += ious[i]; } iou[0] += iou_sum / valid_count_c; } } template <typename T, typename DeviceContext> class MeanIoUCUDAOpKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto& dev_ctx = ctx.template device_context<phi::GPUContext>(); auto& place = *dev_ctx.eigen_device(); // get input and output tensor auto* predictions = ctx.Input<phi::DenseTensor>("Predictions"); auto* labels = ctx.Input<phi::DenseTensor>("Labels"); auto* out_mean_iou = ctx.Output<phi::DenseTensor>("OutMeanIou"); auto* out_wrong = ctx.Output<phi::DenseTensor>("OutWrong"); auto* out_correct = ctx.Output<phi::DenseTensor>("OutCorrect"); int num_classes = static_cast<int>(ctx.Attr<int>("num_classes")); // Get data ptr const T* predictions_data = predictions->data<T>(); const T* labels_data = labels->data<T>(); int* out_wrong_data = out_wrong->mutable_data<int>(ctx.GetPlace()); int* out_correct_data = out_correct->mutable_data<int>(ctx.GetPlace()); float* out_mean_iou_data = out_mean_iou->mutable_data<float>(ctx.GetPlace()); // Get Eigen tensor auto out_mean_iou_t = EigenTensor<float, 1>::From(*out_mean_iou); auto out_wrong_t = EigenTensor<int, 1>::From(*out_wrong); auto out_correct_t = EigenTensor<int, 1>::From(*out_correct); // Temporary memory auto tmp_ious_data = memory::Alloc( dev_ctx.GetPlace(), num_classes * sizeof(float), phi::Stream(reinterpret_cast<phi::StreamId>(dev_ctx.stream()))); float* ious_data = static_cast<float*>(tmp_ious_data->ptr()); // Init out_wrong, out_correct and out_mean_iou out_wrong_t.device(place) = out_wrong_t.constant(0); out_correct_t.device(place) = out_correct_t.constant(0); out_mean_iou_t.device(place) = out_mean_iou_t.constant(0.0f); // collect pre wrong, correct and mean_iou auto in_mean_ious = ctx.MultiInput<phi::DenseTensor>("InMeanIou"); for (int i = 0; i < in_mean_ious.size(); ++i) { out_mean_iou_t.device(place) += EigenTensor<float, 1>::From(*in_mean_ious[i]); } auto in_wrongs = ctx.MultiInput<phi::DenseTensor>("InWrongs"); for (int i = 0; i < in_wrongs.size(); ++i) { out_wrong_t.device(place) += EigenTensor<int, 1>::From(*in_wrongs[i]); } auto in_corrects = ctx.MultiInput<phi::DenseTensor>("InCorrects"); for (int i = 0; i < in_corrects.size(); ++i) { out_correct_t.device(place) += EigenTensor<int, 1>::From(*in_corrects[i]); } // compute auto stream = ctx.cuda_device_context().stream(); int block = PADDLE_CUDA_NUM_THREADS; int grid = (predictions->numel() + block - 1) / block; int cache_size = (num_classes * 2 + 1) * sizeof(int); CountCUDAKernel<T> <<<grid, block, cache_size, stream>>>(num_classes, predictions->numel(), predictions_data, labels_data, out_wrong_data, out_correct_data); ComputeIoUCUDAKernel<<<1, block, 0, stream>>>(num_classes, out_wrong_data, out_correct_data, ious_data, out_mean_iou_data); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; PD_REGISTER_STRUCT_KERNEL( mean_iou, GPU, ALL_LAYOUT, ops::MeanIoUCUDAOpKernel, int, int64_t) {}
821178cec2f8a9467d9202891513149d8fa2bbe7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2014 @precisions normal z -> c d s @author Peng Du @author Tingxing Dong @author Mark Gates This file implements lower case, and is called by ztrtri_kernel.cu. It's convenient to have separate files for lower & upper, to diff the sources. */ #include "common_magma.h" #include "ztrtri.h" /* This inverts the diagonal IB by IB inner blocks of A, and stores the results in d_dinvA. Each thread block with IB threads does one inner block. Each thread deals with one row of the inner block. */ __global__ void ztrtri_diag_kernel_lower( magma_diag_t diag, int n, const magmaDoubleComplex *A, int lda, magmaDoubleComplex *d_dinvA) { int tx = threadIdx.x; int bx = blockIdx.x; int blk_ind = bx*IB; int ind = blk_ind + tx; A += blk_ind + blk_ind*lda; // A(blk_ind, blk_ind) // TODO sB should be [IB][IB+1] to avoid bank conflicts, right? __shared__ magmaDoubleComplex sB[IB*IB]; magmaDoubleComplex y_tx; // load lower triangle of inner block of A; zero upper triangle & outside matrix #pragma unroll for( int j=0; j < IB; j++ ) { if (tx >= j && ind < n) { sB[tx + j*IB] = A[tx + j*lda]; } else { sB[tx + j*IB] = MAGMA_Z_ZERO; } } __syncthreads(); // invert the diagonal if (diag == MagmaUnit) { sB[tx + tx*IB] = MAGMA_Z_ONE; } else { if ( sB[tx + tx*IB] == MAGMA_Z_ZERO ) { // singular or outside matrix sB[tx + tx*IB] = MAGMA_Z_ONE; } else { sB[tx + tx*IB] = MAGMA_Z_ONE / sB[tx + tx*IB]; } } // compute elements j+1:IB-1 of j-th column. for( int j=IB-2; j >= 0; j-- ) { if ( tx > j ) { // trmv: y = sB(j+1:IB-1, j+1:IB-1) * sB(j+1:IB-1, j) // each thread sums one element, y[tx] y_tx = MAGMA_Z_ZERO; #pragma unroll for( int k=j+1; k < IB; k++ ) y_tx += sB[tx + k*IB] * sB[k + j*IB]; // scal: sB(j+1:IB-1, j) = -sB(j,j) * y sB[tx + j*IB] = -sB[j + j*IB] * y_tx; } __syncthreads(); } // go to the (bx / ib_per_NB) outer NB*NB block, // then the (bx % ib_per_NB) inner IB*IB block inside that. int ib_per_NB = NB/IB; d_dinvA += (bx / ib_per_NB)*NB*NB + (bx % ib_per_NB)*(NB*IB + IB); // write result #pragma unroll for( int j=0; j < IB; j++ ) { d_dinvA[tx + j*NB] = sB[tx + j*IB]; } } /* Let A be an NB*NB lower triangular matrix, and B its inverse. Then the block decomposition [ A11 0 ] * [ B11 0 ] = [ I 0 ] [ A21 A22 ] [ B21 B22 ] [ 0 I ] yields A11*B11 = I ==> B11 = A11^{-1}, A22*B22 = I ==> B22 = A22^{-1}, A21*B11 + A22*B21 = 0 ==> B21 = -A22^{-1}*A21*B11 = -B22*A21*B11. ztrtri_diag_kernel inverts A11 and A22. triple_zgemm16 routines multiply: part 1: B21 = A21 * B11, part 2: B21 = -B22 * B21. At this level, inner block is jb=16, with one 4x4 thread block per inner block. Each submatrix Aij and Bij is jb x jb. The submatrix dimension is multiplied by 2 at each level, so the next level is jb*2 = 32. A "page" is the next bigger block, here jb*2=32, [ B11 0 ] which contains [ B21 B22 ]. Outer blocks are NB x NB. A21 may have < jb rows, but is guaranteed to have jb cols since A22 is on the right. This makes a single check easy to do. B is stored in workspace that is a full multiple of NB x NB; no checks needed. We split this into part1 & part2 to synchronize all blocks and make sure that writes to B12 are observed by all blocks. */ /* * B21 = A21 * B11 */ __global__ void triple_zgemm16_part1_lower( int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages) { // emulate 3D grid: NX * (NY*npages) const int by = blockIdx.y / npages; const int page = blockIdx.y % npages; const int tx = threadIdx.x; const int ty = threadIdx.y; const int ibx = blockIdx.x * (blockDim.x*blockDim.y); const int iby = by*16; const int id = tx + ty*blockDim.x; const int ind = page*jb*2 + jb + ibx + id; __shared__ magmaDoubleComplex sB[16][17]; // go to the (page / pages_per_NB) outer NB*NB block, // then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that. int pages_per_NB = NB/(jb*2); d_dinvA += (page / pages_per_NB)*NB*NB + (page % pages_per_NB)*(jb*2*NB + jb*2); //--------------------------part one---------------------------// { // B21 = A21 * B11 const magmaDoubleComplex *A, *B; magmaDoubleComplex *C; int ldb = NB; int ldc = NB; // in gemm notation: C = A*B A = Ain + page*jb*2*lda + page*jb*2 + jb; // A21 B = d_dinvA; // B11 C = d_dinvA + jb; // B21 A += ibx + id; B += tx + (iby + ty)*ldb; C += ibx + id + iby*ldc; const magmaDoubleComplex *Blast = B + jb; // compute NT x 16 block of C // each thread computes one 1x16 row, C(id,0:15) magmaDoubleComplex rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; magmaDoubleComplex rA[4] = {0, 0, 0, 0}; do { // TODO this won't coalesce, will it? unless NX=32 (or maybe 16 with doubles, or 8 with double-complex) // load 16 x 16 block of B using NX x 4 threads #pragma unroll for( int i=0; i < 16; i += 4 ) { // += blockDim.x #pragma unroll for( int j=0; j < 16; j += 4 ) { // += blockDim.y sB[tx + i][ty + j] = B[i + j*ldb]; } } __syncthreads(); if ( ind < n ) { // load NT x 16 block of A; each thread initially loads 1x4 row, // then continues loading more elements as axpys are done. rA[0] = A[0*lda]; rA[1] = A[1*lda]; rA[2] = A[2*lda]; rA[3] = A[3*lda]; // axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15 zaxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda]; zaxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda]; zaxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda]; zaxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda]; zaxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda]; zaxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda]; zaxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda]; zaxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda]; zaxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda]; zaxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda]; zaxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda]; zaxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda]; zaxpy16( rA[0], &sB[12][0], rC ); zaxpy16( rA[1], &sB[13][0], rC ); zaxpy16( rA[2], &sB[14][0], rC ); zaxpy16( rA[3], &sB[15][0], rC ); } // move to next block of A and B A += 16*lda; B += 16; __syncthreads(); } while( B < Blast ); // TODO instead of writing result, copy it to sB and do part 2. // Would only work for jb=16, because only then does rC fit into sB. // If sB were [NT][16+], then rC would fit into sB. // write NT x 16 result; each thread writes one 16x1 row, C(id,0:15) for( int i = 0; i < 16; i++ ) { C[0] = rC[i]; C += ldc; } } } /* * B21 = -B22 * B21 */ __global__ void triple_zgemm16_part2_lower( int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages) { const int by = blockIdx.y / npages; const int page = blockIdx.y % npages; const int tx = threadIdx.x; const int ty = threadIdx.y; const int ibx = blockIdx.x * (blockDim.x*blockDim.y); const int iby = by*16; const int id = tx + ty*blockDim.x; const int ind = page*jb*2 + jb + ibx + id; __shared__ magmaDoubleComplex sB[16][17]; // go to the (page / pages_per_NB) outer NB*NB block, // then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that. int pages_per_NB = NB/(jb*2); d_dinvA += (page / pages_per_NB)*NB*NB + (page % pages_per_NB)*(jb*2*NB + jb*2); //--------------------------part two---------------------------// { // B21 = -B22 * B21 const magmaDoubleComplex *A, *B; magmaDoubleComplex *C; int lda = NB; // shadows lda argument int ldb = NB; int ldc = NB; // in gemm notation: C = A*B A = d_dinvA + jb*NB + jb; // B22 C = d_dinvA + jb; // B21 B = C; // B21, okay to overwrite A += ibx + id; B += tx + (iby + ty)*ldb; C += ibx + id + iby*ldc; // TODO factor this out: // gemm16<NX, NY> computes NT x 16 block of C: // C(1:nt, 1:16) = A(1:nt, 1:jb) * B(1:jb, 1:16) // where NT = NX * NY. // part 1: gemm16<4,4>( /*NT, 16,*/ jb, 1, A21, lda, B11, NB, /*0*/, B21, NB, n, ind, tx, ty ); // part 2: gemm16<4,4>( /*NT, 16,*/ jb, -1, B22, NB, B21, NB, /*0*/, B21, NB, n, ind, tx, ty ); // okay for C to overwrite B const magmaDoubleComplex *Blast = B + jb; // compute NT x 16 block of C // each thread computes one 1x16 row, C(id,0:15) magmaDoubleComplex rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; magmaDoubleComplex rA[4] = {0, 0, 0, 0}; do { // load 16 x 16 block of B using NX x 4 threads #pragma unroll for( int i=0; i < 16; i += 4 ) { // += blockDim.x #pragma unroll for( int j=0; j < 16; j += 4 ) { // += blockDim.y sB[tx + i][ty + j] = B[i + j*ldb]; } } __syncthreads(); if ( ind < n ) { // load NT x 16 block of A; each thread initially loads 1x4 row, // then continues loading more elements as axpys are done. rA[0] = A[0*lda]; rA[1] = A[1*lda]; rA[2] = A[2*lda]; rA[3] = A[3*lda]; // axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15 zaxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda]; zaxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda]; zaxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda]; zaxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda]; zaxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda]; zaxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda]; zaxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda]; zaxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda]; zaxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda]; zaxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda]; zaxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda]; zaxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda]; zaxpy16( rA[0], &sB[12][0], rC ); zaxpy16( rA[1], &sB[13][0], rC ); zaxpy16( rA[2], &sB[14][0], rC ); zaxpy16( rA[3], &sB[15][0], rC ); } // move to next block of A and B A += 16*lda; B += 16; __syncthreads(); } while( B < Blast ); // write NT x 16 result; each thread writes one 16x1 row, C(id,0:15) for( int i = 0; i < 16; i++ ) { C[0] = -rC[i]; C += ldc; } } } /* * B21 = A21 * B11 */ __global__ void triple_zgemm32_part1_lower( int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages) { const int by = blockIdx.y / npages; const int page = blockIdx.y % npages; const int tx = threadIdx.x; const int ty = threadIdx.y; const int ibx = blockIdx.x * (blockDim.x*blockDim.y); const int iby = by*16; const int id = tx + ty*blockDim.x; const int ind = page*jb*2 + jb + ibx + id; __shared__ magmaDoubleComplex sB[16][17]; // go to the (page / pages_per_NB) outer NB*NB block, // then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that. int pages_per_NB = NB/(jb*2); d_dinvA += (page / pages_per_NB)*NB*NB + (page % pages_per_NB)*(jb*2*NB + jb*2); //--------------------------part one---------------------------// { // B21 = A21 * B11 const magmaDoubleComplex *A, *B; magmaDoubleComplex *C; int ldb = NB; int ldc = NB; // in gemm notation: C = A*B A = Ain + page*jb*2*lda + page*jb*2 + jb; // A21 B = d_dinvA; // B11 C = d_dinvA + jb; // B21 A += ibx + id; B += tx + (iby + ty)*ldb; C += ibx + id + iby*ldc; const magmaDoubleComplex *Blast = B + jb; // compute NT x 16 block of C // each thread computes one 1x16 row, C(id,0:15) magmaDoubleComplex rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; magmaDoubleComplex rA[4] = {0, 0, 0, 0}; do { // load 16 x 16 block of B using NX x 4 threads #pragma unroll for( int i=0; i < 16; i += 8 ) { // += blockDim.x #pragma unroll for( int j=0; j < 16; j += 4 ) { // += blockDim.y sB[tx + i][ty + j] = B[i + j*ldb]; } } __syncthreads(); if ( ind < n ) { // load NT x 16 block of A; each thread initially loads 1x4 row, // then continues loading more elements as axpys are done. rA[0] = A[0*lda]; rA[1] = A[1*lda]; rA[2] = A[2*lda]; rA[3] = A[3*lda]; // axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15 zaxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda]; zaxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda]; zaxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda]; zaxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda]; zaxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda]; zaxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda]; zaxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda]; zaxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda]; zaxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda]; zaxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda]; zaxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda]; zaxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda]; zaxpy16( rA[0], &sB[12][0], rC ); zaxpy16( rA[1], &sB[13][0], rC ); zaxpy16( rA[2], &sB[14][0], rC ); zaxpy16( rA[3], &sB[15][0], rC ); } // move to next block of A and B A += 16*lda; B += 16; __syncthreads(); } while( B < Blast ); // write NT x 16 result; each thread writes one 16x1 row, C(id,0:15) for( int i = 0; i < 16; i++ ) { C[0] = rC[i]; C += ldc; } } } /* * B21 = -B22 * B21 */ __global__ void triple_zgemm32_part2_lower( int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages) { const int by = blockIdx.y / npages; const int page = blockIdx.y % npages; const int tx = threadIdx.x; const int ty = threadIdx.y; const int ibx = blockIdx.x * (blockDim.x*blockDim.y); const int iby = by*16; const int id = tx + ty*blockDim.x; const int ind = page*jb*2 + jb + ibx + id; __shared__ magmaDoubleComplex sB[16][17]; // go to the (page / pages_per_NB) outer NB*NB block, // then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that. int pages_per_NB = NB/(jb*2); d_dinvA += (page / pages_per_NB)*NB*NB + (page % pages_per_NB)*(jb*2*NB + jb*2); //--------------------------part two---------------------------// { // B21 = -B22 * B21 const magmaDoubleComplex *A, *B; magmaDoubleComplex *C; int lda = NB; int ldb = NB; int ldc = NB; // in gemm notation: C = A*B A = d_dinvA + jb*NB + jb; // B22 C = d_dinvA + jb; // B21 B = C; // B21, okay to overwrite A += ibx + id; B += tx + (iby + ty)*ldb; C += ibx + id + iby*ldc; const magmaDoubleComplex *Blast = B + jb; // compute NT x 16 block of C // each thread computes one 1x16 row, C(id,0:15) magmaDoubleComplex rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; magmaDoubleComplex rA[4] = {0, 0, 0, 0}; do { // load 16 x 16 block of B using NX x 4 threads #pragma unroll for( int i=0; i < 16; i += 8 ) { // += blockDim.x #pragma unroll for( int j=0; j < 16; j += 4 ) { // += blockDim.y sB[tx + i][ty + j] = B[i + j*ldb]; } } __syncthreads(); if ( ind < n ) { // load NT x 16 block of A; each thread initially loads 1x4 row, // then continues loading more elements as axpys are done. rA[0] = A[0*lda]; rA[1] = A[1*lda]; rA[2] = A[2*lda]; rA[3] = A[3*lda]; // axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15 zaxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda]; zaxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda]; zaxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda]; zaxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda]; zaxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda]; zaxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda]; zaxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda]; zaxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda]; zaxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda]; zaxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda]; zaxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda]; zaxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda]; zaxpy16( rA[0], &sB[12][0], rC ); zaxpy16( rA[1], &sB[13][0], rC ); zaxpy16( rA[2], &sB[14][0], rC ); zaxpy16( rA[3], &sB[15][0], rC ); } // move to next block of A and B A += 16*lda; B += 16; __syncthreads(); } while( B < Blast ); // write NT x 16 result; each thread writes one 16x1 row, C(id,0:15) for( int i = 0; i < 16; i++ ) { C[0] = -rC[i]; C += ldc; } } } /* * B21 = A21 * B11 */ __global__ void triple_zgemm64_part1_lower( int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages) { const int by = blockIdx.y / npages; const int page = blockIdx.y % npages; const int tx = threadIdx.x; const int ty = threadIdx.y; const int ibx = blockIdx.x*64; const int iby = by*16; const int id = tx + ty*16; const int ind = page*jb*2 + jb + ibx + id; __shared__ magmaDoubleComplex sB[16][17]; // go to the (page / pages_per_NB) outer NB*NB block, // then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that. int pages_per_NB = NB/(jb*2); d_dinvA += (page / pages_per_NB)*NB*NB + (page % pages_per_NB)*(jb*2*NB + jb*2); //--------------------------part one---------------------------// { // B21 = A21 * B11 const magmaDoubleComplex *A, *B; magmaDoubleComplex *C; int ldb = NB; int ldc = NB; // in gemm notation: C = A*B A = Ain + page*jb*2*lda + page*jb*2 + jb; // A21 B = d_dinvA; // B11 C = d_dinvA + jb; // B21 A += ibx + id; B += tx + (iby + ty)*ldb; C += ibx + id + iby*ldc; const magmaDoubleComplex *Blast = B + jb; // compute NT x 16 block of C // each thread computes one 1x16 row, C(id,0:15) magmaDoubleComplex rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; magmaDoubleComplex rA[4] = {0, 0, 0, 0}; do { // load 16 x 16 block of B using NX x 4 threads #pragma unroll for( int i=0; i < 16; i += 16 ) { // += blockDim.x #pragma unroll for( int j=0; j < 16; j += 4 ) { // += blockDim.y sB[tx + i][ty + j] = B[i + j*ldb]; } } __syncthreads(); if ( ind < n ) { // load NT x 16 block of A; each thread initially loads 1x4 row, // then continues loading more elements as axpys are done. rA[0] = A[0*lda]; rA[1] = A[1*lda]; rA[2] = A[2*lda]; rA[3] = A[3*lda]; // axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15 zaxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda]; zaxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda]; zaxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda]; zaxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda]; zaxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda]; zaxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda]; zaxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda]; zaxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda]; zaxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda]; zaxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda]; zaxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda]; zaxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda]; zaxpy16( rA[0], &sB[12][0], rC ); zaxpy16( rA[1], &sB[13][0], rC ); zaxpy16( rA[2], &sB[14][0], rC ); zaxpy16( rA[3], &sB[15][0], rC ); } // move to next block of A and B A += 16*lda; B += 16; __syncthreads(); } while( B < Blast ); // write NT x 16 result; each thread writes one 16x1 row, C(id,0:15) for( int i = 0; i < 16; i++ ) { C[0] = rC[i]; C += ldc; } } } /* * B21 = -B22 * B21 */ __global__ void triple_zgemm64_part2_lower( int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages) { const int by = blockIdx.y / npages; const int page = blockIdx.y % npages; const int tx = threadIdx.x; const int ty = threadIdx.y; const int ibx = blockIdx.x*64; const int iby = by*16; const int id = tx + ty*16; const int ind = page*jb*2 + jb + ibx + id; __shared__ magmaDoubleComplex sB[16][17]; // go to the (page / pages_per_NB) outer NB*NB block, // then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that. int pages_per_NB = NB/(jb*2); d_dinvA += (page / pages_per_NB)*NB*NB + (page % pages_per_NB)*(jb*2*NB + jb*2); //--------------------------part two---------------------------// { // B21 = -B22 * B21 const magmaDoubleComplex *A, *B; magmaDoubleComplex *C; int lda = NB; int ldb = NB; int ldc = NB; // in gemm notation: C = A*B A = d_dinvA + jb*NB + jb; // B22 C = d_dinvA + jb; // B21 B = C; // B21, okay to overwrite A += ibx + id; B += tx + (iby + ty)*ldb; C += ibx + id + iby*ldc; const magmaDoubleComplex *Blast = B + jb; // compute NT x 16 block of C // each thread computes one 1x16 row, C(id,0:15) magmaDoubleComplex rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; magmaDoubleComplex rA[4] = {0, 0, 0, 0}; do { // load 16 x 16 block of B using NX x 4 threads #pragma unroll for( int i=0; i < 16; i += 16 ) { // += blockDim.x #pragma unroll for( int j=0; j < 16; j += 4 ) { // += blockDim.y sB[tx + i][ty + j] = B[i + j*ldb]; } } __syncthreads(); if ( ind < n ) { // load NT x 16 block of A; each thread initially loads 1x4 row, // then continues loading more elements as axpys are done. rA[0] = A[0*lda]; rA[1] = A[1*lda]; rA[2] = A[2*lda]; rA[3] = A[3*lda]; // axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15 zaxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda]; zaxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda]; zaxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda]; zaxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda]; zaxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda]; zaxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda]; zaxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda]; zaxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda]; zaxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda]; zaxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda]; zaxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda]; zaxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda]; zaxpy16( rA[0], &sB[12][0], rC ); zaxpy16( rA[1], &sB[13][0], rC ); zaxpy16( rA[2], &sB[14][0], rC ); zaxpy16( rA[3], &sB[15][0], rC ); } // move to next block of A and B A += 16*lda; B += 16; __syncthreads(); } while( B < Blast ); // write NT x 16 result; each thread writes one 16x1 row, C(id,0:15) for( int i = 0; i < 16; i++ ) { C[0] = -rC[i]; C += ldc; } } } /* * B21 = A21 * B11 */ __global__ void triple_zgemm_above64_part1_lower( int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages) { const int by = blockIdx.y / npages; const int page = blockIdx.y % npages; const int tx = threadIdx.x; const int ty = threadIdx.y; const int ibx = blockIdx.x*64; const int iby = by*16; const int id = tx + ty*16; const int ind = page*jb*2 + jb + ibx + id; __shared__ magmaDoubleComplex sB[16][17]; // go to the (page / pages_per_NB) outer NB*NB block, // then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that. int pages_per_NB = NB/(jb*2); d_dinvA += (page / pages_per_NB)*NB*NB + (page % pages_per_NB)*(jb*2*NB + jb*2); //--------------------------part one---------------------------// { // B21 = A21 * B11 const magmaDoubleComplex *A, *B; magmaDoubleComplex *C; int ldb = NB; int ldc = NB; // For jb > 64, we process B21 as gridDim.x sections of 64 rows each, with gridDim.x > 1. // Each section needs all of the B matrix, so C cannot overwrite B. // Therefore, store B21 temporarily in the previously unused B12 matrix // (i.e., above diagonal), then in part 3, zero out B12. // // Kernels with jb <= 64 don't have this problem, because only the // NT x 16 section of C that overwrites the same section of B depends // on that section of B. // // in gemm notation: C = A*B A = Ain + page*jb*2*lda + page*jb*2 + jb; // A21 B = d_dinvA; // B11 C = d_dinvA + jb*NB; // B21; write to B12 temp location A += ibx + id; B += tx + (iby + ty)*ldb; C += ibx + id + iby*ldc; const magmaDoubleComplex *Blast = B + jb; // compute NT x 16 block of C // each thread computes one 1x16 row, C(id,0:15) magmaDoubleComplex rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; magmaDoubleComplex rA[4] = {0, 0, 0, 0}; do { // load 16 x 16 block of B using NX x 4 threads #pragma unroll for( int i=0; i < 16; i += 16 ) { // += blockDim.x #pragma unroll for( int j=0; j < 16; j += 4 ) { // += blockDim.y sB[tx + i][ty + j] = B[i + j*ldb]; } } __syncthreads(); if ( ind < n ) { // load NT x 16 block of A; each thread initially loads 1x4 row, // then continues loading more elements as axpys are done. rA[0] = A[0*lda]; rA[1] = A[1*lda]; rA[2] = A[2*lda]; rA[3] = A[3*lda]; // axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15 zaxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda]; zaxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda]; zaxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda]; zaxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda]; zaxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda]; zaxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda]; zaxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda]; zaxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda]; zaxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda]; zaxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda]; zaxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda]; zaxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda]; zaxpy16( rA[0], &sB[12][0], rC ); zaxpy16( rA[1], &sB[13][0], rC ); zaxpy16( rA[2], &sB[14][0], rC ); zaxpy16( rA[3], &sB[15][0], rC ); } // move to next block of A and B A += 16*lda; B += 16; __syncthreads(); } while( B < Blast ); // write NT x 16 result; each thread writes one 16x1 row, C(id,0:15) for( int i = 0; i < 16; i++ ) { C[0] = rC[i]; C += ldc; } } } /* * B21 = -B22 * B21 */ __global__ void triple_zgemm_above64_part2_lower( int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages) { const int by = blockIdx.y / npages; const int page = blockIdx.y % npages; const int tx = threadIdx.x; const int ty = threadIdx.y; const int ibx = blockIdx.x*64; const int iby = by*16; const int id = tx + ty*16; const int ind = page*jb*2 + jb + ibx + id; __shared__ magmaDoubleComplex sB[16][17]; // go to the (page / pages_per_NB) outer NB*NB block, // then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that. int pages_per_NB = NB/(jb*2); d_dinvA += (page / pages_per_NB)*NB*NB + (page % pages_per_NB)*(jb*2*NB + jb*2); //--------------------------part two---------------------------// { // B21 = -B22 * B21 const magmaDoubleComplex *A, *B; magmaDoubleComplex *C; int lda = NB; int ldb = NB; int ldc = NB; // in gemm notation: C = A*B A = d_dinvA + jb*NB + jb; // B22 B = d_dinvA + jb*NB; // B21, read from B12 temp location C = d_dinvA + jb; // B21 A += ibx + id; B += tx + (iby + ty)*ldb; C += ibx + id + iby*ldc; const magmaDoubleComplex *Blast = B + jb; // compute NT x 16 block of C // each thread computes one 1x16 row, C(id,0:15) magmaDoubleComplex rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; magmaDoubleComplex rA[4] = {0, 0, 0, 0}; do { // load 16 x 16 block of B using NX x 4 threads #pragma unroll for( int i=0; i < 16; i += 16 ) { // += blockDim.x #pragma unroll for( int j=0; j < 16; j += 4 ) { // += blockDim.y sB[tx + i][ty + j] = B[i + j*ldb]; } } __syncthreads(); if ( ind < n ) { // load NT x 16 block of A; each thread initially loads 1x4 row, // then continues loading more elements as axpys are done. rA[0] = A[0*lda]; rA[1] = A[1*lda]; rA[2] = A[2*lda]; rA[3] = A[3*lda]; // axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15 zaxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda]; zaxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda]; zaxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda]; zaxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda]; zaxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda]; zaxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda]; zaxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda]; zaxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda]; zaxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda]; zaxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda]; zaxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda]; zaxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda]; zaxpy16( rA[0], &sB[12][0], rC ); zaxpy16( rA[1], &sB[13][0], rC ); zaxpy16( rA[2], &sB[14][0], rC ); zaxpy16( rA[3], &sB[15][0], rC ); } // move to next block of A and B A += 16*lda; B += 16; __syncthreads(); } while( B < Blast ); // write NT x 16 result; each thread writes one 16x1 row, C(id,0:15) for( int i = 0; i < 16; i++ ) { C[0] = -rC[i]; C += ldc; } } } /* * zero out B12 temp location */ __global__ void triple_zgemm_above64_part3_lower( int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages) { const int by = blockIdx.y / npages; const int page = blockIdx.y % npages; const int tx = threadIdx.x; const int ty = threadIdx.y; const int ibx = blockIdx.x*64; const int iby = by*16; const int id = tx + ty*16; // go to the (page / pages_per_NB) outer NB*NB block, // then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that. int pages_per_NB = NB/(jb*2); d_dinvA += (page / pages_per_NB)*NB*NB + (page % pages_per_NB)*(jb*2*NB + jb*2); //--------------------------part three---------------------------// { // zero out B12 temp location magmaDoubleComplex *B12; int ldb = NB; B12 = d_dinvA + jb*NB; B12 += ibx + id + iby*ldb; #pragma unroll for( int i = 0; i < 16; i++ ) { B12[i*ldb] = MAGMA_Z_ZERO; } } }
821178cec2f8a9467d9202891513149d8fa2bbe7.cu
/* -- MAGMA (version 1.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2014 @precisions normal z -> c d s @author Peng Du @author Tingxing Dong @author Mark Gates This file implements lower case, and is called by ztrtri_kernel.cu. It's convenient to have separate files for lower & upper, to diff the sources. */ #include "common_magma.h" #include "ztrtri.h" /* This inverts the diagonal IB by IB inner blocks of A, and stores the results in d_dinvA. Each thread block with IB threads does one inner block. Each thread deals with one row of the inner block. */ __global__ void ztrtri_diag_kernel_lower( magma_diag_t diag, int n, const magmaDoubleComplex *A, int lda, magmaDoubleComplex *d_dinvA) { int tx = threadIdx.x; int bx = blockIdx.x; int blk_ind = bx*IB; int ind = blk_ind + tx; A += blk_ind + blk_ind*lda; // A(blk_ind, blk_ind) // TODO sB should be [IB][IB+1] to avoid bank conflicts, right? __shared__ magmaDoubleComplex sB[IB*IB]; magmaDoubleComplex y_tx; // load lower triangle of inner block of A; zero upper triangle & outside matrix #pragma unroll for( int j=0; j < IB; j++ ) { if (tx >= j && ind < n) { sB[tx + j*IB] = A[tx + j*lda]; } else { sB[tx + j*IB] = MAGMA_Z_ZERO; } } __syncthreads(); // invert the diagonal if (diag == MagmaUnit) { sB[tx + tx*IB] = MAGMA_Z_ONE; } else { if ( sB[tx + tx*IB] == MAGMA_Z_ZERO ) { // singular or outside matrix sB[tx + tx*IB] = MAGMA_Z_ONE; } else { sB[tx + tx*IB] = MAGMA_Z_ONE / sB[tx + tx*IB]; } } // compute elements j+1:IB-1 of j-th column. for( int j=IB-2; j >= 0; j-- ) { if ( tx > j ) { // trmv: y = sB(j+1:IB-1, j+1:IB-1) * sB(j+1:IB-1, j) // each thread sums one element, y[tx] y_tx = MAGMA_Z_ZERO; #pragma unroll for( int k=j+1; k < IB; k++ ) y_tx += sB[tx + k*IB] * sB[k + j*IB]; // scal: sB(j+1:IB-1, j) = -sB(j,j) * y sB[tx + j*IB] = -sB[j + j*IB] * y_tx; } __syncthreads(); } // go to the (bx / ib_per_NB) outer NB*NB block, // then the (bx % ib_per_NB) inner IB*IB block inside that. int ib_per_NB = NB/IB; d_dinvA += (bx / ib_per_NB)*NB*NB + (bx % ib_per_NB)*(NB*IB + IB); // write result #pragma unroll for( int j=0; j < IB; j++ ) { d_dinvA[tx + j*NB] = sB[tx + j*IB]; } } /* Let A be an NB*NB lower triangular matrix, and B its inverse. Then the block decomposition [ A11 0 ] * [ B11 0 ] = [ I 0 ] [ A21 A22 ] [ B21 B22 ] [ 0 I ] yields A11*B11 = I ==> B11 = A11^{-1}, A22*B22 = I ==> B22 = A22^{-1}, A21*B11 + A22*B21 = 0 ==> B21 = -A22^{-1}*A21*B11 = -B22*A21*B11. ztrtri_diag_kernel inverts A11 and A22. triple_zgemm16 routines multiply: part 1: B21 = A21 * B11, part 2: B21 = -B22 * B21. At this level, inner block is jb=16, with one 4x4 thread block per inner block. Each submatrix Aij and Bij is jb x jb. The submatrix dimension is multiplied by 2 at each level, so the next level is jb*2 = 32. A "page" is the next bigger block, here jb*2=32, [ B11 0 ] which contains [ B21 B22 ]. Outer blocks are NB x NB. A21 may have < jb rows, but is guaranteed to have jb cols since A22 is on the right. This makes a single check easy to do. B is stored in workspace that is a full multiple of NB x NB; no checks needed. We split this into part1 & part2 to synchronize all blocks and make sure that writes to B12 are observed by all blocks. */ /* * B21 = A21 * B11 */ __global__ void triple_zgemm16_part1_lower( int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages) { // emulate 3D grid: NX * (NY*npages) const int by = blockIdx.y / npages; const int page = blockIdx.y % npages; const int tx = threadIdx.x; const int ty = threadIdx.y; const int ibx = blockIdx.x * (blockDim.x*blockDim.y); const int iby = by*16; const int id = tx + ty*blockDim.x; const int ind = page*jb*2 + jb + ibx + id; __shared__ magmaDoubleComplex sB[16][17]; // go to the (page / pages_per_NB) outer NB*NB block, // then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that. int pages_per_NB = NB/(jb*2); d_dinvA += (page / pages_per_NB)*NB*NB + (page % pages_per_NB)*(jb*2*NB + jb*2); //--------------------------part one---------------------------// { // B21 = A21 * B11 const magmaDoubleComplex *A, *B; magmaDoubleComplex *C; int ldb = NB; int ldc = NB; // in gemm notation: C = A*B A = Ain + page*jb*2*lda + page*jb*2 + jb; // A21 B = d_dinvA; // B11 C = d_dinvA + jb; // B21 A += ibx + id; B += tx + (iby + ty)*ldb; C += ibx + id + iby*ldc; const magmaDoubleComplex *Blast = B + jb; // compute NT x 16 block of C // each thread computes one 1x16 row, C(id,0:15) magmaDoubleComplex rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; magmaDoubleComplex rA[4] = {0, 0, 0, 0}; do { // TODO this won't coalesce, will it? unless NX=32 (or maybe 16 with doubles, or 8 with double-complex) // load 16 x 16 block of B using NX x 4 threads #pragma unroll for( int i=0; i < 16; i += 4 ) { // += blockDim.x #pragma unroll for( int j=0; j < 16; j += 4 ) { // += blockDim.y sB[tx + i][ty + j] = B[i + j*ldb]; } } __syncthreads(); if ( ind < n ) { // load NT x 16 block of A; each thread initially loads 1x4 row, // then continues loading more elements as axpys are done. rA[0] = A[0*lda]; rA[1] = A[1*lda]; rA[2] = A[2*lda]; rA[3] = A[3*lda]; // axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15 zaxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda]; zaxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda]; zaxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda]; zaxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda]; zaxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda]; zaxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda]; zaxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda]; zaxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda]; zaxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda]; zaxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda]; zaxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda]; zaxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda]; zaxpy16( rA[0], &sB[12][0], rC ); zaxpy16( rA[1], &sB[13][0], rC ); zaxpy16( rA[2], &sB[14][0], rC ); zaxpy16( rA[3], &sB[15][0], rC ); } // move to next block of A and B A += 16*lda; B += 16; __syncthreads(); } while( B < Blast ); // TODO instead of writing result, copy it to sB and do part 2. // Would only work for jb=16, because only then does rC fit into sB. // If sB were [NT][16+], then rC would fit into sB. // write NT x 16 result; each thread writes one 16x1 row, C(id,0:15) for( int i = 0; i < 16; i++ ) { C[0] = rC[i]; C += ldc; } } } /* * B21 = -B22 * B21 */ __global__ void triple_zgemm16_part2_lower( int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages) { const int by = blockIdx.y / npages; const int page = blockIdx.y % npages; const int tx = threadIdx.x; const int ty = threadIdx.y; const int ibx = blockIdx.x * (blockDim.x*blockDim.y); const int iby = by*16; const int id = tx + ty*blockDim.x; const int ind = page*jb*2 + jb + ibx + id; __shared__ magmaDoubleComplex sB[16][17]; // go to the (page / pages_per_NB) outer NB*NB block, // then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that. int pages_per_NB = NB/(jb*2); d_dinvA += (page / pages_per_NB)*NB*NB + (page % pages_per_NB)*(jb*2*NB + jb*2); //--------------------------part two---------------------------// { // B21 = -B22 * B21 const magmaDoubleComplex *A, *B; magmaDoubleComplex *C; int lda = NB; // shadows lda argument int ldb = NB; int ldc = NB; // in gemm notation: C = A*B A = d_dinvA + jb*NB + jb; // B22 C = d_dinvA + jb; // B21 B = C; // B21, okay to overwrite A += ibx + id; B += tx + (iby + ty)*ldb; C += ibx + id + iby*ldc; // TODO factor this out: // gemm16<NX, NY> computes NT x 16 block of C: // C(1:nt, 1:16) = A(1:nt, 1:jb) * B(1:jb, 1:16) // where NT = NX * NY. // part 1: gemm16<4,4>( /*NT, 16,*/ jb, 1, A21, lda, B11, NB, /*0*/, B21, NB, n, ind, tx, ty ); // part 2: gemm16<4,4>( /*NT, 16,*/ jb, -1, B22, NB, B21, NB, /*0*/, B21, NB, n, ind, tx, ty ); // okay for C to overwrite B const magmaDoubleComplex *Blast = B + jb; // compute NT x 16 block of C // each thread computes one 1x16 row, C(id,0:15) magmaDoubleComplex rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; magmaDoubleComplex rA[4] = {0, 0, 0, 0}; do { // load 16 x 16 block of B using NX x 4 threads #pragma unroll for( int i=0; i < 16; i += 4 ) { // += blockDim.x #pragma unroll for( int j=0; j < 16; j += 4 ) { // += blockDim.y sB[tx + i][ty + j] = B[i + j*ldb]; } } __syncthreads(); if ( ind < n ) { // load NT x 16 block of A; each thread initially loads 1x4 row, // then continues loading more elements as axpys are done. rA[0] = A[0*lda]; rA[1] = A[1*lda]; rA[2] = A[2*lda]; rA[3] = A[3*lda]; // axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15 zaxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda]; zaxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda]; zaxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda]; zaxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda]; zaxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda]; zaxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda]; zaxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda]; zaxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda]; zaxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda]; zaxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda]; zaxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda]; zaxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda]; zaxpy16( rA[0], &sB[12][0], rC ); zaxpy16( rA[1], &sB[13][0], rC ); zaxpy16( rA[2], &sB[14][0], rC ); zaxpy16( rA[3], &sB[15][0], rC ); } // move to next block of A and B A += 16*lda; B += 16; __syncthreads(); } while( B < Blast ); // write NT x 16 result; each thread writes one 16x1 row, C(id,0:15) for( int i = 0; i < 16; i++ ) { C[0] = -rC[i]; C += ldc; } } } /* * B21 = A21 * B11 */ __global__ void triple_zgemm32_part1_lower( int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages) { const int by = blockIdx.y / npages; const int page = blockIdx.y % npages; const int tx = threadIdx.x; const int ty = threadIdx.y; const int ibx = blockIdx.x * (blockDim.x*blockDim.y); const int iby = by*16; const int id = tx + ty*blockDim.x; const int ind = page*jb*2 + jb + ibx + id; __shared__ magmaDoubleComplex sB[16][17]; // go to the (page / pages_per_NB) outer NB*NB block, // then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that. int pages_per_NB = NB/(jb*2); d_dinvA += (page / pages_per_NB)*NB*NB + (page % pages_per_NB)*(jb*2*NB + jb*2); //--------------------------part one---------------------------// { // B21 = A21 * B11 const magmaDoubleComplex *A, *B; magmaDoubleComplex *C; int ldb = NB; int ldc = NB; // in gemm notation: C = A*B A = Ain + page*jb*2*lda + page*jb*2 + jb; // A21 B = d_dinvA; // B11 C = d_dinvA + jb; // B21 A += ibx + id; B += tx + (iby + ty)*ldb; C += ibx + id + iby*ldc; const magmaDoubleComplex *Blast = B + jb; // compute NT x 16 block of C // each thread computes one 1x16 row, C(id,0:15) magmaDoubleComplex rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; magmaDoubleComplex rA[4] = {0, 0, 0, 0}; do { // load 16 x 16 block of B using NX x 4 threads #pragma unroll for( int i=0; i < 16; i += 8 ) { // += blockDim.x #pragma unroll for( int j=0; j < 16; j += 4 ) { // += blockDim.y sB[tx + i][ty + j] = B[i + j*ldb]; } } __syncthreads(); if ( ind < n ) { // load NT x 16 block of A; each thread initially loads 1x4 row, // then continues loading more elements as axpys are done. rA[0] = A[0*lda]; rA[1] = A[1*lda]; rA[2] = A[2*lda]; rA[3] = A[3*lda]; // axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15 zaxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda]; zaxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda]; zaxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda]; zaxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda]; zaxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda]; zaxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda]; zaxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda]; zaxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda]; zaxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda]; zaxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda]; zaxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda]; zaxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda]; zaxpy16( rA[0], &sB[12][0], rC ); zaxpy16( rA[1], &sB[13][0], rC ); zaxpy16( rA[2], &sB[14][0], rC ); zaxpy16( rA[3], &sB[15][0], rC ); } // move to next block of A and B A += 16*lda; B += 16; __syncthreads(); } while( B < Blast ); // write NT x 16 result; each thread writes one 16x1 row, C(id,0:15) for( int i = 0; i < 16; i++ ) { C[0] = rC[i]; C += ldc; } } } /* * B21 = -B22 * B21 */ __global__ void triple_zgemm32_part2_lower( int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages) { const int by = blockIdx.y / npages; const int page = blockIdx.y % npages; const int tx = threadIdx.x; const int ty = threadIdx.y; const int ibx = blockIdx.x * (blockDim.x*blockDim.y); const int iby = by*16; const int id = tx + ty*blockDim.x; const int ind = page*jb*2 + jb + ibx + id; __shared__ magmaDoubleComplex sB[16][17]; // go to the (page / pages_per_NB) outer NB*NB block, // then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that. int pages_per_NB = NB/(jb*2); d_dinvA += (page / pages_per_NB)*NB*NB + (page % pages_per_NB)*(jb*2*NB + jb*2); //--------------------------part two---------------------------// { // B21 = -B22 * B21 const magmaDoubleComplex *A, *B; magmaDoubleComplex *C; int lda = NB; int ldb = NB; int ldc = NB; // in gemm notation: C = A*B A = d_dinvA + jb*NB + jb; // B22 C = d_dinvA + jb; // B21 B = C; // B21, okay to overwrite A += ibx + id; B += tx + (iby + ty)*ldb; C += ibx + id + iby*ldc; const magmaDoubleComplex *Blast = B + jb; // compute NT x 16 block of C // each thread computes one 1x16 row, C(id,0:15) magmaDoubleComplex rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; magmaDoubleComplex rA[4] = {0, 0, 0, 0}; do { // load 16 x 16 block of B using NX x 4 threads #pragma unroll for( int i=0; i < 16; i += 8 ) { // += blockDim.x #pragma unroll for( int j=0; j < 16; j += 4 ) { // += blockDim.y sB[tx + i][ty + j] = B[i + j*ldb]; } } __syncthreads(); if ( ind < n ) { // load NT x 16 block of A; each thread initially loads 1x4 row, // then continues loading more elements as axpys are done. rA[0] = A[0*lda]; rA[1] = A[1*lda]; rA[2] = A[2*lda]; rA[3] = A[3*lda]; // axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15 zaxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda]; zaxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda]; zaxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda]; zaxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda]; zaxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda]; zaxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda]; zaxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda]; zaxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda]; zaxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda]; zaxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda]; zaxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda]; zaxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda]; zaxpy16( rA[0], &sB[12][0], rC ); zaxpy16( rA[1], &sB[13][0], rC ); zaxpy16( rA[2], &sB[14][0], rC ); zaxpy16( rA[3], &sB[15][0], rC ); } // move to next block of A and B A += 16*lda; B += 16; __syncthreads(); } while( B < Blast ); // write NT x 16 result; each thread writes one 16x1 row, C(id,0:15) for( int i = 0; i < 16; i++ ) { C[0] = -rC[i]; C += ldc; } } } /* * B21 = A21 * B11 */ __global__ void triple_zgemm64_part1_lower( int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages) { const int by = blockIdx.y / npages; const int page = blockIdx.y % npages; const int tx = threadIdx.x; const int ty = threadIdx.y; const int ibx = blockIdx.x*64; const int iby = by*16; const int id = tx + ty*16; const int ind = page*jb*2 + jb + ibx + id; __shared__ magmaDoubleComplex sB[16][17]; // go to the (page / pages_per_NB) outer NB*NB block, // then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that. int pages_per_NB = NB/(jb*2); d_dinvA += (page / pages_per_NB)*NB*NB + (page % pages_per_NB)*(jb*2*NB + jb*2); //--------------------------part one---------------------------// { // B21 = A21 * B11 const magmaDoubleComplex *A, *B; magmaDoubleComplex *C; int ldb = NB; int ldc = NB; // in gemm notation: C = A*B A = Ain + page*jb*2*lda + page*jb*2 + jb; // A21 B = d_dinvA; // B11 C = d_dinvA + jb; // B21 A += ibx + id; B += tx + (iby + ty)*ldb; C += ibx + id + iby*ldc; const magmaDoubleComplex *Blast = B + jb; // compute NT x 16 block of C // each thread computes one 1x16 row, C(id,0:15) magmaDoubleComplex rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; magmaDoubleComplex rA[4] = {0, 0, 0, 0}; do { // load 16 x 16 block of B using NX x 4 threads #pragma unroll for( int i=0; i < 16; i += 16 ) { // += blockDim.x #pragma unroll for( int j=0; j < 16; j += 4 ) { // += blockDim.y sB[tx + i][ty + j] = B[i + j*ldb]; } } __syncthreads(); if ( ind < n ) { // load NT x 16 block of A; each thread initially loads 1x4 row, // then continues loading more elements as axpys are done. rA[0] = A[0*lda]; rA[1] = A[1*lda]; rA[2] = A[2*lda]; rA[3] = A[3*lda]; // axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15 zaxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda]; zaxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda]; zaxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda]; zaxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda]; zaxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda]; zaxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda]; zaxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda]; zaxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda]; zaxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda]; zaxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda]; zaxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda]; zaxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda]; zaxpy16( rA[0], &sB[12][0], rC ); zaxpy16( rA[1], &sB[13][0], rC ); zaxpy16( rA[2], &sB[14][0], rC ); zaxpy16( rA[3], &sB[15][0], rC ); } // move to next block of A and B A += 16*lda; B += 16; __syncthreads(); } while( B < Blast ); // write NT x 16 result; each thread writes one 16x1 row, C(id,0:15) for( int i = 0; i < 16; i++ ) { C[0] = rC[i]; C += ldc; } } } /* * B21 = -B22 * B21 */ __global__ void triple_zgemm64_part2_lower( int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages) { const int by = blockIdx.y / npages; const int page = blockIdx.y % npages; const int tx = threadIdx.x; const int ty = threadIdx.y; const int ibx = blockIdx.x*64; const int iby = by*16; const int id = tx + ty*16; const int ind = page*jb*2 + jb + ibx + id; __shared__ magmaDoubleComplex sB[16][17]; // go to the (page / pages_per_NB) outer NB*NB block, // then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that. int pages_per_NB = NB/(jb*2); d_dinvA += (page / pages_per_NB)*NB*NB + (page % pages_per_NB)*(jb*2*NB + jb*2); //--------------------------part two---------------------------// { // B21 = -B22 * B21 const magmaDoubleComplex *A, *B; magmaDoubleComplex *C; int lda = NB; int ldb = NB; int ldc = NB; // in gemm notation: C = A*B A = d_dinvA + jb*NB + jb; // B22 C = d_dinvA + jb; // B21 B = C; // B21, okay to overwrite A += ibx + id; B += tx + (iby + ty)*ldb; C += ibx + id + iby*ldc; const magmaDoubleComplex *Blast = B + jb; // compute NT x 16 block of C // each thread computes one 1x16 row, C(id,0:15) magmaDoubleComplex rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; magmaDoubleComplex rA[4] = {0, 0, 0, 0}; do { // load 16 x 16 block of B using NX x 4 threads #pragma unroll for( int i=0; i < 16; i += 16 ) { // += blockDim.x #pragma unroll for( int j=0; j < 16; j += 4 ) { // += blockDim.y sB[tx + i][ty + j] = B[i + j*ldb]; } } __syncthreads(); if ( ind < n ) { // load NT x 16 block of A; each thread initially loads 1x4 row, // then continues loading more elements as axpys are done. rA[0] = A[0*lda]; rA[1] = A[1*lda]; rA[2] = A[2*lda]; rA[3] = A[3*lda]; // axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15 zaxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda]; zaxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda]; zaxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda]; zaxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda]; zaxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda]; zaxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda]; zaxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda]; zaxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda]; zaxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda]; zaxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda]; zaxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda]; zaxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda]; zaxpy16( rA[0], &sB[12][0], rC ); zaxpy16( rA[1], &sB[13][0], rC ); zaxpy16( rA[2], &sB[14][0], rC ); zaxpy16( rA[3], &sB[15][0], rC ); } // move to next block of A and B A += 16*lda; B += 16; __syncthreads(); } while( B < Blast ); // write NT x 16 result; each thread writes one 16x1 row, C(id,0:15) for( int i = 0; i < 16; i++ ) { C[0] = -rC[i]; C += ldc; } } } /* * B21 = A21 * B11 */ __global__ void triple_zgemm_above64_part1_lower( int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages) { const int by = blockIdx.y / npages; const int page = blockIdx.y % npages; const int tx = threadIdx.x; const int ty = threadIdx.y; const int ibx = blockIdx.x*64; const int iby = by*16; const int id = tx + ty*16; const int ind = page*jb*2 + jb + ibx + id; __shared__ magmaDoubleComplex sB[16][17]; // go to the (page / pages_per_NB) outer NB*NB block, // then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that. int pages_per_NB = NB/(jb*2); d_dinvA += (page / pages_per_NB)*NB*NB + (page % pages_per_NB)*(jb*2*NB + jb*2); //--------------------------part one---------------------------// { // B21 = A21 * B11 const magmaDoubleComplex *A, *B; magmaDoubleComplex *C; int ldb = NB; int ldc = NB; // For jb > 64, we process B21 as gridDim.x sections of 64 rows each, with gridDim.x > 1. // Each section needs all of the B matrix, so C cannot overwrite B. // Therefore, store B21 temporarily in the previously unused B12 matrix // (i.e., above diagonal), then in part 3, zero out B12. // // Kernels with jb <= 64 don't have this problem, because only the // NT x 16 section of C that overwrites the same section of B depends // on that section of B. // // in gemm notation: C = A*B A = Ain + page*jb*2*lda + page*jb*2 + jb; // A21 B = d_dinvA; // B11 C = d_dinvA + jb*NB; // B21; write to B12 temp location A += ibx + id; B += tx + (iby + ty)*ldb; C += ibx + id + iby*ldc; const magmaDoubleComplex *Blast = B + jb; // compute NT x 16 block of C // each thread computes one 1x16 row, C(id,0:15) magmaDoubleComplex rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; magmaDoubleComplex rA[4] = {0, 0, 0, 0}; do { // load 16 x 16 block of B using NX x 4 threads #pragma unroll for( int i=0; i < 16; i += 16 ) { // += blockDim.x #pragma unroll for( int j=0; j < 16; j += 4 ) { // += blockDim.y sB[tx + i][ty + j] = B[i + j*ldb]; } } __syncthreads(); if ( ind < n ) { // load NT x 16 block of A; each thread initially loads 1x4 row, // then continues loading more elements as axpys are done. rA[0] = A[0*lda]; rA[1] = A[1*lda]; rA[2] = A[2*lda]; rA[3] = A[3*lda]; // axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15 zaxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda]; zaxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda]; zaxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda]; zaxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda]; zaxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda]; zaxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda]; zaxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda]; zaxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda]; zaxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda]; zaxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda]; zaxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda]; zaxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda]; zaxpy16( rA[0], &sB[12][0], rC ); zaxpy16( rA[1], &sB[13][0], rC ); zaxpy16( rA[2], &sB[14][0], rC ); zaxpy16( rA[3], &sB[15][0], rC ); } // move to next block of A and B A += 16*lda; B += 16; __syncthreads(); } while( B < Blast ); // write NT x 16 result; each thread writes one 16x1 row, C(id,0:15) for( int i = 0; i < 16; i++ ) { C[0] = rC[i]; C += ldc; } } } /* * B21 = -B22 * B21 */ __global__ void triple_zgemm_above64_part2_lower( int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages) { const int by = blockIdx.y / npages; const int page = blockIdx.y % npages; const int tx = threadIdx.x; const int ty = threadIdx.y; const int ibx = blockIdx.x*64; const int iby = by*16; const int id = tx + ty*16; const int ind = page*jb*2 + jb + ibx + id; __shared__ magmaDoubleComplex sB[16][17]; // go to the (page / pages_per_NB) outer NB*NB block, // then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that. int pages_per_NB = NB/(jb*2); d_dinvA += (page / pages_per_NB)*NB*NB + (page % pages_per_NB)*(jb*2*NB + jb*2); //--------------------------part two---------------------------// { // B21 = -B22 * B21 const magmaDoubleComplex *A, *B; magmaDoubleComplex *C; int lda = NB; int ldb = NB; int ldc = NB; // in gemm notation: C = A*B A = d_dinvA + jb*NB + jb; // B22 B = d_dinvA + jb*NB; // B21, read from B12 temp location C = d_dinvA + jb; // B21 A += ibx + id; B += tx + (iby + ty)*ldb; C += ibx + id + iby*ldc; const magmaDoubleComplex *Blast = B + jb; // compute NT x 16 block of C // each thread computes one 1x16 row, C(id,0:15) magmaDoubleComplex rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; magmaDoubleComplex rA[4] = {0, 0, 0, 0}; do { // load 16 x 16 block of B using NX x 4 threads #pragma unroll for( int i=0; i < 16; i += 16 ) { // += blockDim.x #pragma unroll for( int j=0; j < 16; j += 4 ) { // += blockDim.y sB[tx + i][ty + j] = B[i + j*ldb]; } } __syncthreads(); if ( ind < n ) { // load NT x 16 block of A; each thread initially loads 1x4 row, // then continues loading more elements as axpys are done. rA[0] = A[0*lda]; rA[1] = A[1*lda]; rA[2] = A[2*lda]; rA[3] = A[3*lda]; // axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15 zaxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda]; zaxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda]; zaxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda]; zaxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda]; zaxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda]; zaxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda]; zaxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda]; zaxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda]; zaxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda]; zaxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda]; zaxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda]; zaxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda]; zaxpy16( rA[0], &sB[12][0], rC ); zaxpy16( rA[1], &sB[13][0], rC ); zaxpy16( rA[2], &sB[14][0], rC ); zaxpy16( rA[3], &sB[15][0], rC ); } // move to next block of A and B A += 16*lda; B += 16; __syncthreads(); } while( B < Blast ); // write NT x 16 result; each thread writes one 16x1 row, C(id,0:15) for( int i = 0; i < 16; i++ ) { C[0] = -rC[i]; C += ldc; } } } /* * zero out B12 temp location */ __global__ void triple_zgemm_above64_part3_lower( int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages) { const int by = blockIdx.y / npages; const int page = blockIdx.y % npages; const int tx = threadIdx.x; const int ty = threadIdx.y; const int ibx = blockIdx.x*64; const int iby = by*16; const int id = tx + ty*16; // go to the (page / pages_per_NB) outer NB*NB block, // then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that. int pages_per_NB = NB/(jb*2); d_dinvA += (page / pages_per_NB)*NB*NB + (page % pages_per_NB)*(jb*2*NB + jb*2); //--------------------------part three---------------------------// { // zero out B12 temp location magmaDoubleComplex *B12; int ldb = NB; B12 = d_dinvA + jb*NB; B12 += ibx + id + iby*ldb; #pragma unroll for( int i = 0; i < 16; i++ ) { B12[i*ldb] = MAGMA_Z_ZERO; } } }
d16ee87c69aa5871e9947a94c8a1b023879d5345.hip
// !!! This is a file automatically generated by hipify!!! #include <drivers/nesterov_driver.h> #include <solvers/nesterov_sgd.h> #include <core/errors.h> #include <device/cuda_utils.h> #include <device/gen_random.h> #include <device/device_defines.h> #include <device/handles.h> #include <functions/dev_initializations.h> #include <utilities/print_utils.h> #include <limits.h> #include <stdlib.h> #include <stdio.h> #include <float.h> void initNesterovParams( NESTEROV_PARAMS *params, int n ) { //sampled_tr_cg.m file. params->step= 0.001; //learning rate params->momentum = 0.9; //eps params->lambda = 0; params->maxProps = ULONG_MAX; params->maxEpochs = 20; params->sampleSize = floor( 256 ); } void testNesterov (NN_MODEL *model, DEVICE_DATASET *data, SCRATCH_AREA *scratch ) { NESTEROV_PARAMS mParams; //begin here fprintf( stderr, "Initiating the Trust Region Test now..... \n\n\n"); initNesterovParams( &mParams, data->trainSizeX ); fprintf( stderr, "... Done parms initialization \n\n"); //init weights to ZEROS cuda_memset( data->weights, 0, sizeof(real) * model->pSize, ERROR_MEMSET ); //init weights to Random Vector /* getRandomVector( model->pSize, NULL, scratch->nextDevPtr, RAND_NORMAL ); copy_device( data->weights, scratch->nextDevPtr, sizeof(real) * model->pSize, ERROR_MEMCPY_DEVICE_DEVICE ); real scale = 0.25; cublasCheckError( hipblasDscal( cublasHandle, model->pSize, &scale, data->weights, 1 )); */ nesterov_sgd( model, data, scratch, &mParams ); fprintf( stderr, ".... Done testing of Nesterov \n\n\n" ); }
d16ee87c69aa5871e9947a94c8a1b023879d5345.cu
#include <drivers/nesterov_driver.h> #include <solvers/nesterov_sgd.h> #include <core/errors.h> #include <device/cuda_utils.h> #include <device/gen_random.h> #include <device/device_defines.h> #include <device/handles.h> #include <functions/dev_initializations.h> #include <utilities/print_utils.h> #include <limits.h> #include <stdlib.h> #include <stdio.h> #include <float.h> void initNesterovParams( NESTEROV_PARAMS *params, int n ) { //sampled_tr_cg.m file. params->step= 0.001; //learning rate params->momentum = 0.9; //eps params->lambda = 0; params->maxProps = ULONG_MAX; params->maxEpochs = 20; params->sampleSize = floor( 256 ); } void testNesterov (NN_MODEL *model, DEVICE_DATASET *data, SCRATCH_AREA *scratch ) { NESTEROV_PARAMS mParams; //begin here fprintf( stderr, "Initiating the Trust Region Test now..... \n\n\n"); initNesterovParams( &mParams, data->trainSizeX ); fprintf( stderr, "... Done parms initialization \n\n"); //init weights to ZEROS cuda_memset( data->weights, 0, sizeof(real) * model->pSize, ERROR_MEMSET ); //init weights to Random Vector /* getRandomVector( model->pSize, NULL, scratch->nextDevPtr, RAND_NORMAL ); copy_device( data->weights, scratch->nextDevPtr, sizeof(real) * model->pSize, ERROR_MEMCPY_DEVICE_DEVICE ); real scale = 0.25; cublasCheckError( cublasDscal( cublasHandle, model->pSize, &scale, data->weights, 1 )); */ nesterov_sgd( model, data, scratch, &mParams ); fprintf( stderr, ".... Done testing of Nesterov \n\n\n" ); }
5817447dbd77787d6f27ffaacb85323c85517e80.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "io.h" #include "layer.h" #include <rocblas.h> /* io */ const int trsize = 60000; /* net */ const int features = 784; const int hidden = 1000; const int classes = 10; const int train_iter = 1000; const int batch_size = 256; const float reg = 0.5f; const float lrate = 0.001f; const float regularization = 1.0f - ((lrate * reg) / trsize); /* gpu */ const int ntpb = 512; const float neg = -1.0f; const float alph = 1.0f; const float bet = 0.0f; __device__ float drelu(float x) { return x > 0.0f ? 1.0f : 0.0f; } __global__ void gpu_relu(const float *__restrict__ A, float *__restrict__ B, int N) { int idx = threadIdx.x + blockDim.x * blockIdx.x; if(idx < N) B[idx] = fmaxf(0.0f, A[idx]); } __global__ void gpu_drelu(float *A, int N) { int idx = threadIdx.x + blockDim.x * blockIdx.x; if(idx < N) A[idx] = drelu(A[idx]); } __global__ void gpu_exp(float *A, int N) { int idx = threadIdx.x + blockDim.x * blockIdx.x; if(idx < N) A[idx] = exp(A[idx]); } __global__ void gpu_subscal(const float *__restrict__ A, float *__restrict__ B, float x, int N) { int idx = threadIdx.x + blockDim.x * blockIdx.x; if(idx < N) B[idx] = A[idx] - x; } __global__ void gpu_sm_normalize(const float *__restrict__ A, float *__restrict__ B, int N) { int idx = threadIdx.x + blockDim.x * blockIdx.x; if(idx < N) { int midx = 0; hipblasHandle_t h; hipblasCreate(&h); hipblasIsamax(h, classes, A + idx * classes, 1, &midx); hipLaunchKernelGGL(( gpu_subscal), dim3((classes+ntpb-1)/ntpb),dim3(ntpb), 0, 0, A + idx * classes, B + idx * classes, *(A + (midx - 1) + idx * classes), classes ); hipblasDestroy(h); } } __global__ void gpu_sm_expscal(float *A, int N) { int idx = threadIdx.x + blockDim.x * blockIdx.x; if(idx < N) { float sum = 0.0f; hipblasHandle_t h; hipblasCreate(&h); hipblasSasum(h, classes, A + idx * classes, 1, &sum); sum = 1.0f / sum; hipblasSscal(h, classes, &sum, A + idx * classes, 1); hipblasDestroy(h); } } int main(void) { srand(time(NULL)); FILE *trf; trf = fopen("mnist_train.csv", "r"); struct iod *io = iod_create(features, classes, trsize); struct layer *hid = layer_create(hidden, features, batch_size); struct layer *out = layer_create(classes, hidden, batch_size); hipblasHandle_t handle; hipblasCreate(&handle); float *dtmp; hipMalloc((void **)&dtmp, sizeof(float) * hidden * batch_size); iod_parse(io, trf); /* training loop */ unsigned stepy = 0; for(int e = 0; e < train_iter; ++e) { /* choose random start for mini batch submatrix */ stepy = ((trsize - batch_size) + 1) * (float)rand()/(float)RAND_MAX; /* forward propagation */ hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, hidden, batch_size, features, &alph, hid->w, hidden, io->i + stepy * features, features, &bet, hid->wi, hidden); hipblasSgeam(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, hidden, batch_size, &alph, hid->wi, hidden, &alph, hid->b, hidden, hid->wi, hidden); hipLaunchKernelGGL(( gpu_relu), dim3((hidden*batch_size+ntpb-1)/ntpb),dim3(ntpb), 0, 0, hid->wi, hid->a, hidden * batch_size); hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, classes, batch_size, hidden, &alph, out->w, classes, hid->a, hidden, &bet, out->wi, classes); hipblasSgeam(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, classes, batch_size, &alph, out->wi, classes, &alph, out->b, classes, out->wi, classes); hipLaunchKernelGGL(( gpu_sm_normalize), dim3((batch_size+ntpb-1)/ntpb),dim3(ntpb), 0, 0, out->wi, out->a, batch_size); hipLaunchKernelGGL(( gpu_exp), dim3((classes*batch_size+ntpb-1)/ntpb),dim3(ntpb), 0, 0, out->a, classes * batch_size); hipLaunchKernelGGL(( gpu_sm_expscal), dim3((batch_size+ntpb-1)/ntpb),dim3(ntpb), 0, 0, out->a, batch_size); /* backpropagation */ hipblasSgeam(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, classes, batch_size, &alph, out->a, classes, &neg, io->t + stepy * classes, classes, out->bu, classes); hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_T, classes, hidden, batch_size, &alph, out->bu, classes, hid->a, hidden, &bet, out->wu, classes); hipblasSgemm(handle, HIPBLAS_OP_T, HIPBLAS_OP_N, hidden, batch_size, classes, &alph, out->w, classes, out->bu, classes, &bet, hid->bu, hidden); hipLaunchKernelGGL(( gpu_drelu), dim3((hidden*batch_size+ntpb-1)/ntpb),dim3(ntpb), 0, 0, hid->wi, hidden); hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, hidden, batch_size, hidden, &alph, hid->bu, hidden, hid->wi, hidden, &bet, dtmp, hidden); hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_T, hidden, features, batch_size, &alph, dtmp, hidden, io->i + stepy * features, features, &bet, hid->wu, hidden); /* scale gradients */ hipblasSscal(handle, classes * batch_size, &lrate, out->bu, 1); hipblasSscal(handle, classes * hidden, &lrate, out->wu, 1); hipblasSscal(handle, hidden * batch_size, &lrate, hid->bu, 1); hipblasSscal(handle, hidden * features, &lrate, hid->wu, 1); /* regularize weights */ hipblasSscal(handle, classes * hidden, &regularization, out->w, 1); hipblasSscal(handle, hidden * features, &regularization, hid->w, 1); /* update weights */ hipblasSgeam(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, classes, batch_size, &alph, out->b, classes, &neg, out->bu, classes, out->b, classes); hipblasSgeam(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, classes, hidden, &alph, out->w, classes, &neg, out->wu, classes, out->w, classes); hipblasSgeam(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, hidden, batch_size, &alph, hid->b, hidden, &neg, hid->bu, hidden, hid->b, hidden); hipblasSgeam(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, hidden, features, &alph, hid->w, hidden, &neg, hid->wu, hidden, hid->w, hidden); } layer_destroy(hid); layer_destroy(out); iod_destroy(io); hipFree(dtmp); hipblasDestroy(handle); }
5817447dbd77787d6f27ffaacb85323c85517e80.cu
#include "io.h" #include "layer.h" #include <cublas_v2.h> /* io */ const int trsize = 60000; /* net */ const int features = 784; const int hidden = 1000; const int classes = 10; const int train_iter = 1000; const int batch_size = 256; const float reg = 0.5f; const float lrate = 0.001f; const float regularization = 1.0f - ((lrate * reg) / trsize); /* gpu */ const int ntpb = 512; const float neg = -1.0f; const float alph = 1.0f; const float bet = 0.0f; __device__ float drelu(float x) { return x > 0.0f ? 1.0f : 0.0f; } __global__ void gpu_relu(const float *__restrict__ A, float *__restrict__ B, int N) { int idx = threadIdx.x + blockDim.x * blockIdx.x; if(idx < N) B[idx] = fmaxf(0.0f, A[idx]); } __global__ void gpu_drelu(float *A, int N) { int idx = threadIdx.x + blockDim.x * blockIdx.x; if(idx < N) A[idx] = drelu(A[idx]); } __global__ void gpu_exp(float *A, int N) { int idx = threadIdx.x + blockDim.x * blockIdx.x; if(idx < N) A[idx] = exp(A[idx]); } __global__ void gpu_subscal(const float *__restrict__ A, float *__restrict__ B, float x, int N) { int idx = threadIdx.x + blockDim.x * blockIdx.x; if(idx < N) B[idx] = A[idx] - x; } __global__ void gpu_sm_normalize(const float *__restrict__ A, float *__restrict__ B, int N) { int idx = threadIdx.x + blockDim.x * blockIdx.x; if(idx < N) { int midx = 0; cublasHandle_t h; cublasCreate(&h); cublasIsamax(h, classes, A + idx * classes, 1, &midx); gpu_subscal<<<(classes+ntpb-1)/ntpb,ntpb>>>( A + idx * classes, B + idx * classes, *(A + (midx - 1) + idx * classes), classes ); cublasDestroy(h); } } __global__ void gpu_sm_expscal(float *A, int N) { int idx = threadIdx.x + blockDim.x * blockIdx.x; if(idx < N) { float sum = 0.0f; cublasHandle_t h; cublasCreate(&h); cublasSasum(h, classes, A + idx * classes, 1, &sum); sum = 1.0f / sum; cublasSscal(h, classes, &sum, A + idx * classes, 1); cublasDestroy(h); } } int main(void) { srand(time(NULL)); FILE *trf; trf = fopen("mnist_train.csv", "r"); struct iod *io = iod_create(features, classes, trsize); struct layer *hid = layer_create(hidden, features, batch_size); struct layer *out = layer_create(classes, hidden, batch_size); cublasHandle_t handle; cublasCreate(&handle); float *dtmp; cudaMalloc((void **)&dtmp, sizeof(float) * hidden * batch_size); iod_parse(io, trf); /* training loop */ unsigned stepy = 0; for(int e = 0; e < train_iter; ++e) { /* choose random start for mini batch submatrix */ stepy = ((trsize - batch_size) + 1) * (float)rand()/(float)RAND_MAX; /* forward propagation */ cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, hidden, batch_size, features, &alph, hid->w, hidden, io->i + stepy * features, features, &bet, hid->wi, hidden); cublasSgeam(handle, CUBLAS_OP_N, CUBLAS_OP_N, hidden, batch_size, &alph, hid->wi, hidden, &alph, hid->b, hidden, hid->wi, hidden); gpu_relu<<<(hidden*batch_size+ntpb-1)/ntpb,ntpb>>>(hid->wi, hid->a, hidden * batch_size); cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, classes, batch_size, hidden, &alph, out->w, classes, hid->a, hidden, &bet, out->wi, classes); cublasSgeam(handle, CUBLAS_OP_N, CUBLAS_OP_N, classes, batch_size, &alph, out->wi, classes, &alph, out->b, classes, out->wi, classes); gpu_sm_normalize<<<(batch_size+ntpb-1)/ntpb,ntpb>>>(out->wi, out->a, batch_size); gpu_exp<<<(classes*batch_size+ntpb-1)/ntpb,ntpb>>>(out->a, classes * batch_size); gpu_sm_expscal<<<(batch_size+ntpb-1)/ntpb,ntpb>>>(out->a, batch_size); /* backpropagation */ cublasSgeam(handle, CUBLAS_OP_N, CUBLAS_OP_N, classes, batch_size, &alph, out->a, classes, &neg, io->t + stepy * classes, classes, out->bu, classes); cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_T, classes, hidden, batch_size, &alph, out->bu, classes, hid->a, hidden, &bet, out->wu, classes); cublasSgemm(handle, CUBLAS_OP_T, CUBLAS_OP_N, hidden, batch_size, classes, &alph, out->w, classes, out->bu, classes, &bet, hid->bu, hidden); gpu_drelu<<<(hidden*batch_size+ntpb-1)/ntpb,ntpb>>>(hid->wi, hidden); cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, hidden, batch_size, hidden, &alph, hid->bu, hidden, hid->wi, hidden, &bet, dtmp, hidden); cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_T, hidden, features, batch_size, &alph, dtmp, hidden, io->i + stepy * features, features, &bet, hid->wu, hidden); /* scale gradients */ cublasSscal(handle, classes * batch_size, &lrate, out->bu, 1); cublasSscal(handle, classes * hidden, &lrate, out->wu, 1); cublasSscal(handle, hidden * batch_size, &lrate, hid->bu, 1); cublasSscal(handle, hidden * features, &lrate, hid->wu, 1); /* regularize weights */ cublasSscal(handle, classes * hidden, &regularization, out->w, 1); cublasSscal(handle, hidden * features, &regularization, hid->w, 1); /* update weights */ cublasSgeam(handle, CUBLAS_OP_N, CUBLAS_OP_N, classes, batch_size, &alph, out->b, classes, &neg, out->bu, classes, out->b, classes); cublasSgeam(handle, CUBLAS_OP_N, CUBLAS_OP_N, classes, hidden, &alph, out->w, classes, &neg, out->wu, classes, out->w, classes); cublasSgeam(handle, CUBLAS_OP_N, CUBLAS_OP_N, hidden, batch_size, &alph, hid->b, hidden, &neg, hid->bu, hidden, hid->b, hidden); cublasSgeam(handle, CUBLAS_OP_N, CUBLAS_OP_N, hidden, features, &alph, hid->w, hidden, &neg, hid->wu, hidden, hid->w, hidden); } layer_destroy(hid); layer_destroy(out); iod_destroy(io); cudaFree(dtmp); cublasDestroy(handle); }
25f3039226f9b60426ffbaaaa8a3e94232c69aec.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ #include <faiss/gpu/GpuResources.h> #include <faiss/gpu/utils/DeviceUtils.h> #include <faiss/impl/AuxIndexStructures.h> #include <faiss/impl/FaissAssert.h> #include <faiss/gpu/impl/BroadcastSum.cuh> #include <faiss/gpu/impl/BroadcastSumBurst.cuh> #include <faiss/gpu/impl/BurstPatchDistance.cuh> #include <faiss/gpu/impl/DistanceUtils.cuh> #include <faiss/gpu/impl/L2Norm.cuh> #include <faiss/gpu/impl/BurstNnfL2Norm.cuh> #include <faiss/gpu/impl/L2Select.cuh> #include <faiss/gpu/utils/BurstBlockSelectKernel.cuh> #include <faiss/gpu/utils/DeviceDefs.cuh> #include <faiss/gpu/utils/Limits.cuh> #include <faiss/gpu/utils/MatrixMult.cuh> #include <faiss/gpu/utils/BurstNnfSimpleBlockSelect.cuh> #include <faiss/gpu/utils/BlockIndices2Labels.cuh> #include <cstdio> #include <thrust/device_ptr.h> #include <thrust/execution_policy.h> #include <thrust/fill.h> #include <thrust/for_each.h> #include <algorithm> #include <memory> namespace faiss { namespace gpu { template <typename T> void runBurstPatchDistance( GpuResources* res, hipStream_t stream, Tensor<T, 4, true>& burst, Tensor<int, 3, true>& blockLabels, int k, int t, int h, int w, int c, int patchsize, int nblocks, float valMean, Tensor<float, 3, true>& outDistances, Tensor<int, 5, true>& outIndices, bool computeL2) { // Size of proposed image auto nframes = burst.getSize(0); auto nftrs = burst.getSize(1); auto heightPad = burst.getSize(2); auto widthPad = burst.getSize(3); int nblocks_total = blockLabels.getSize(1); int nblocks2 = nblocks*nblocks; int pad = ::floor(patchsize/2) + ::floor(nblocks/2); int psHalf = ::floor(patchsize/2); constexpr int nstreams = 16; // Size of vals image auto height = outDistances.getSize(0); auto width = outDistances.getSize(1); auto kOut = outDistances.getSize(2); // Size of indices image auto nframes_outInd = outIndices.getSize(0); auto heightInd = outIndices.getSize(1); auto widthInd = outIndices.getSize(2); auto kOutInd = outIndices.getSize(3); auto two = outIndices.getSize(4); // Assert same size FAISS_ASSERT(nframes == nframes_outInd); FAISS_ASSERT(height == (heightPad-2*pad)); FAISS_ASSERT(width == (widthPad-2*pad)); FAISS_ASSERT(height == heightInd); FAISS_ASSERT(width == widthInd); FAISS_ASSERT(kOut == k); FAISS_ASSERT(kOutInd == k); FAISS_ASSERT(two == 2); // FAISS_ASSERT(nblocks_total == utils::pow(nblocks2,nframes-1)); // init for comparison right now, to be removed. // thrust::fill(thrust::hip::par.on(stream), // outDistances.data(), // outDistances.end(), // Limits<float>::getMax()); // If we're querying against a 0 sized set, just return empty results if (height == 0 || width == 0 || nftrs == 0) { thrust::fill( thrust::hip::par.on(stream), outDistances.data(), outDistances.end(), Limits<float>::getMax()); thrust::fill( thrust::hip::par.on(stream), outIndices.data(), outIndices.end(), -1); return; } // By default, aim to use up to 512 MB of memory for the processing, with // both number of queries and number of centroids being at least 512. int tileHeight = 0; // batchsize across height int tileWidth = 0; // batchsize across width int tileBlocks = 0; // batchsize across blocks chooseImageTileSize( height, // image height width, // image width nftrs, // num of features per pixel patchsize, // patchsize nblocks_total, // number of image blocks to search sizeof(T), res->getTempMemoryAvailableCurrentDevice(), tileHeight, tileWidth, tileBlocks); // tileBlocks = 128; int numHeightTiles = utils::divUp(height, tileHeight); int numWidthTiles = utils::divUp(width, tileWidth); int numBlockTiles = utils::divUp((nblocks_total), tileBlocks); // printf("(tileHeight,tileWidth,tileBlocks): (%d,%d,%d)\n", // tileHeight,tileWidth,tileBlocks); // We can have any number of vectors to query against, even less than k, in // which case we'll return -1 for the index FAISS_ASSERT(k <= GPU_MAX_SELECTION_K); // select limitation // // Temporary memory space to *execute* a single batch // DeviceTensor<float, 3, true> distanceBuf_1(res, makeTempAlloc(AllocType::Other, stream), {tileHeight, tileWidth, tileBlocks}); DeviceTensor<float, 3, true> distanceBuf_2(res, makeTempAlloc(AllocType::Other, stream), {tileHeight, tileWidth, tileBlocks}); DeviceTensor<float, 3, true> distanceBuf_3(res, makeTempAlloc(AllocType::Other, stream), {tileHeight, tileWidth, tileBlocks}); DeviceTensor<float, 3, true> distanceBuf_4(res, makeTempAlloc(AllocType::Other, stream), {tileHeight, tileWidth, tileBlocks}); DeviceTensor<float, 3, true> distanceBuf_5(res, makeTempAlloc(AllocType::Other, stream), {tileHeight, tileWidth, tileBlocks}); DeviceTensor<float, 3, true> distanceBuf_6(res, makeTempAlloc(AllocType::Other, stream), {tileHeight, tileWidth, tileBlocks}); DeviceTensor<float, 3, true> distanceBuf_7(res, makeTempAlloc(AllocType::Other, stream), {tileHeight, tileWidth, tileBlocks}); DeviceTensor<float, 3, true> distanceBuf_8(res, makeTempAlloc(AllocType::Other, stream), {tileHeight, tileWidth, tileBlocks}); DeviceTensor<float, 3, true> distanceBuf_9(res, makeTempAlloc(AllocType::Other, stream), {tileHeight, tileWidth, tileBlocks}); DeviceTensor<float, 3, true> distanceBuf_10(res, makeTempAlloc(AllocType::Other, stream), {tileHeight, tileWidth, tileBlocks}); DeviceTensor<float, 3, true> distanceBuf_11(res, makeTempAlloc(AllocType::Other, stream), {tileHeight, tileWidth, tileBlocks}); DeviceTensor<float, 3, true> distanceBuf_12(res, makeTempAlloc(AllocType::Other, stream), {tileHeight, tileWidth, tileBlocks}); DeviceTensor<float, 3, true> distanceBuf_13(res, makeTempAlloc(AllocType::Other, stream), {tileHeight, tileWidth, tileBlocks}); DeviceTensor<float, 3, true> distanceBuf_14(res, makeTempAlloc(AllocType::Other, stream), {tileHeight, tileWidth, tileBlocks}); DeviceTensor<float, 3, true> distanceBuf_15(res, makeTempAlloc(AllocType::Other, stream), {tileHeight, tileWidth, tileBlocks}); DeviceTensor<float, 3, true> distanceBuf_16(res, makeTempAlloc(AllocType::Other, stream), {tileHeight, tileWidth, tileBlocks}); DeviceTensor<float, 3, true>* distanceBufs[16] = {&distanceBuf_1, &distanceBuf_2, &distanceBuf_3, &distanceBuf_4, &distanceBuf_5, &distanceBuf_6, &distanceBuf_7, &distanceBuf_8, &distanceBuf_9, &distanceBuf_10, &distanceBuf_11, &distanceBuf_12, &distanceBuf_13, &distanceBuf_14, &distanceBuf_15, &distanceBuf_16}; // std::vector<DeviceTensor<float, 3, true>> distanceBufs(nstreams, // DeviceTensor<float, 3, true>(res, // makeTempAlloc(AllocType::Other, stream), // {tileHeight, tileWidth, tileBlocks})); // DeviceTensor<float, 3, true>** distanceBufs = new DeviceTensor<float, 3, true>*[nstreams]; // std::vector<DeviceTensor<float, 3, true>> distanceBufs; // distanceBufs.resize(nstreams); // #pragma unroll // for (int i = 0; i < nstreams; ++i){ // auto distBuf_i = new DeviceTensor<float, 3, true>(res, // makeTempAlloc(AllocType::Other, stream), // {tileHeight, tileWidth, tileBlocks}); // distanceBufs[i] = distBuf_i; // // distanceBufs.push_back(distBuf_i); // } // for (int i = 0; i < nstreams; ++i){ // for (int j = 0; j < distanceBufs[i].NumDim; ++j){ // printf("[%d]: getSize(%d): %d\n",i,j,distanceBufs[i].getSize(j)); // } // //isContiguous // } DeviceTensor<int, 3, true> indexingBuf_1(res, makeTempAlloc(AllocType::Other, stream), {tileHeight, tileWidth, k}); DeviceTensor<int, 3, true> indexingBuf_2(res, makeTempAlloc(AllocType::Other, stream), {tileHeight, tileWidth, k}); DeviceTensor<int, 3, true> indexingBuf_3(res, makeTempAlloc(AllocType::Other, stream), {tileHeight, tileWidth, k}); DeviceTensor<int, 3, true> indexingBuf_4(res, makeTempAlloc(AllocType::Other, stream), {tileHeight, tileWidth, k}); DeviceTensor<int, 3, true> indexingBuf_5(res, makeTempAlloc(AllocType::Other, stream), {tileHeight, tileWidth, k}); DeviceTensor<int, 3, true> indexingBuf_6(res, makeTempAlloc(AllocType::Other, stream), {tileHeight, tileWidth, k}); DeviceTensor<int, 3, true> indexingBuf_7(res, makeTempAlloc(AllocType::Other, stream), {tileHeight, tileWidth, k}); DeviceTensor<int, 3, true> indexingBuf_8(res, makeTempAlloc(AllocType::Other, stream), {tileHeight, tileWidth, k}); DeviceTensor<int, 3, true> indexingBuf_9(res, makeTempAlloc(AllocType::Other, stream), {tileHeight, tileWidth, k}); DeviceTensor<int, 3, true> indexingBuf_10(res, makeTempAlloc(AllocType::Other, stream), {tileHeight, tileWidth, k}); DeviceTensor<int, 3, true> indexingBuf_11(res, makeTempAlloc(AllocType::Other, stream), {tileHeight, tileWidth, k}); DeviceTensor<int, 3, true> indexingBuf_12(res, makeTempAlloc(AllocType::Other, stream), {tileHeight, tileWidth, k}); DeviceTensor<int, 3, true> indexingBuf_13(res, makeTempAlloc(AllocType::Other, stream), {tileHeight, tileWidth, k}); DeviceTensor<int, 3, true> indexingBuf_14(res, makeTempAlloc(AllocType::Other, stream), {tileHeight, tileWidth, k}); DeviceTensor<int, 3, true> indexingBuf_15(res, makeTempAlloc(AllocType::Other, stream), {tileHeight, tileWidth, k}); DeviceTensor<int, 3, true> indexingBuf_16(res, makeTempAlloc(AllocType::Other, stream), {tileHeight, tileWidth, k}); DeviceTensor<int, 3, true>* indexingBufs[16] = {&indexingBuf_1, &indexingBuf_2, &indexingBuf_3, &indexingBuf_4, &indexingBuf_5, &indexingBuf_6, &indexingBuf_7, &indexingBuf_8, &indexingBuf_9, &indexingBuf_10, &indexingBuf_11, &indexingBuf_12, &indexingBuf_13, &indexingBuf_14, &indexingBuf_15, &indexingBuf_16,}; // // Temporary memory space to *ave* a single batch of images // int tileHeightPad = tileHeight + 2*psHalf; int tileWidthPad = tileWidth + 2*psHalf; DeviceTensor<T, 4, true> aveBuf_1(res, makeTempAlloc(AllocType::Other, stream), {nftrs, tileBlocks, tileHeightPad, tileWidthPad}); DeviceTensor<T, 4, true> aveBuf_2(res, makeTempAlloc(AllocType::Other, stream), {nftrs, tileBlocks, tileHeightPad, tileWidthPad}); DeviceTensor<T, 4, true> aveBuf_3(res, makeTempAlloc(AllocType::Other, stream), {nftrs, tileBlocks, tileHeightPad, tileWidthPad}); DeviceTensor<T, 4, true> aveBuf_4(res, makeTempAlloc(AllocType::Other, stream), {nftrs, tileBlocks, tileHeightPad, tileWidthPad}); DeviceTensor<T, 4, true> aveBuf_5(res, makeTempAlloc(AllocType::Other, stream), {nftrs, tileBlocks, tileHeightPad, tileWidthPad}); DeviceTensor<T, 4, true> aveBuf_6(res, makeTempAlloc(AllocType::Other, stream), {nftrs, tileBlocks, tileHeightPad, tileWidthPad}); DeviceTensor<T, 4, true> aveBuf_7(res, makeTempAlloc(AllocType::Other, stream), {nftrs, tileBlocks, tileHeightPad, tileWidthPad}); DeviceTensor<T, 4, true> aveBuf_8(res, makeTempAlloc(AllocType::Other, stream), {nftrs, tileBlocks, tileHeightPad, tileWidthPad}); DeviceTensor<T, 4, true> aveBuf_9(res, makeTempAlloc(AllocType::Other, stream), {nftrs, tileBlocks, tileHeightPad, tileWidthPad}); DeviceTensor<T, 4, true> aveBuf_10(res, makeTempAlloc(AllocType::Other, stream), {nftrs, tileBlocks, tileHeightPad, tileWidthPad}); DeviceTensor<T, 4, true> aveBuf_11(res, makeTempAlloc(AllocType::Other, stream), {nftrs, tileBlocks, tileHeightPad, tileWidthPad}); DeviceTensor<T, 4, true> aveBuf_12(res, makeTempAlloc(AllocType::Other, stream), {nftrs, tileBlocks, tileHeightPad, tileWidthPad}); DeviceTensor<T, 4, true> aveBuf_13(res, makeTempAlloc(AllocType::Other, stream), {nftrs, tileBlocks, tileHeightPad, tileWidthPad}); DeviceTensor<T, 4, true> aveBuf_14(res, makeTempAlloc(AllocType::Other, stream), {nftrs, tileBlocks, tileHeightPad, tileWidthPad}); DeviceTensor<T, 4, true> aveBuf_15(res, makeTempAlloc(AllocType::Other, stream), {nftrs, tileBlocks, tileHeightPad, tileWidthPad}); DeviceTensor<T, 4, true> aveBuf_16(res, makeTempAlloc(AllocType::Other, stream), {nftrs, tileBlocks, tileHeightPad, tileWidthPad}); DeviceTensor<T, 4, true>* aveBufs[16]; aveBufs[0] = &aveBuf_1; aveBufs[1] = &aveBuf_2; aveBufs[2] = &aveBuf_3; aveBufs[3] = &aveBuf_4; aveBufs[4] = &aveBuf_5; aveBufs[5] = &aveBuf_6; aveBufs[6] = &aveBuf_7; aveBufs[7] = &aveBuf_8; aveBufs[8] = &aveBuf_9; aveBufs[9] = &aveBuf_10; aveBufs[10] = &aveBuf_11; aveBufs[11] = &aveBuf_12; aveBufs[12] = &aveBuf_13; aveBufs[13] = &aveBuf_14; aveBufs[14] = &aveBuf_15; aveBufs[15] = &aveBuf_16; // DeviceTensor<T, 4, true>* aveBufs[2]; // std::vector<DeviceTensor<T, 4, true>> aveBufsVec; // aveBufsVec.reserve(2); // #pragma unroll // for (int i = 0; i < 2; ++i){ // DeviceTensor<T, 4, true> aveBuf_i(res, // makeTempAlloc(AllocType::Other, stream), // {nftrs, tileBlocks, tileHeightPad, tileWidthPad}); // aveBufs.push_back(aveBuf_i); // aveBufsVec.push_back(DeviceTensor<T, 4, true>(res, // makeTempAlloc(AllocType::Other, stream), // {nftrs, tileBlocks, tileHeightPad, tileWidthPad})); // aveBufs[i] = &aveBuf_i; // aveBufs[i] = new DeviceTensor<T, 4, true> (res, // makeTempAlloc(AllocType::Other, stream), // {nftrs, tileBlocks, tileHeightPad, tileWidthPad}); // } // DeviceTensor<T, 4, true>* aveBufs[nstreams] = {}; // for (int i = 0; i < nstreams; ++i){ // auto aveBuf_i = DeviceTensor<T, 4, true>(res, // makeTempAlloc(AllocType::Other, stream), // {nftrs, tileBlocks, tileHeightPad, tileWidthPad}); // res->syncDefaultStreamCurrentDevice(); // aveBufs[i] = &aveBuf_i; // } // Streams allow for concurrent kernel execs. auto streams = res->getAlternateStreamsCurrentDevice(); streamWait(streams, {stream}); int curStream = 0; bool interrupt = false; // Tile HEIGHT pixels for (int i = 0; i < height; i += tileHeight) { if (interrupt || InterruptCallback::is_interrupted()) { interrupt = true; break; } // create indices for height tiling int curHeightSize = ::min(tileHeight, height - i); int paddedWidthSize = tileWidth + 2*(pad); int paddedHeightSize = tileHeight + 2*(pad); paddedHeightSize = ::min(paddedHeightSize,heightPad - i); // create views from height tile auto outDistanceHeightView = outDistances.narrow(0, i, curHeightSize); auto outIndexHeightView = outIndices.narrow(1, i, curHeightSize); auto burstHeightView = burst.narrow(2, i, paddedHeightSize); // Tile WIDTH pixels for (int j = 0; j < width; j += tileWidth) { if (InterruptCallback::is_interrupted()) { interrupt = true; break; } // create indices for height tiling int curWidthSize = ::min(tileWidth, width - j); int paddedWidthSize = tileWidth + 2*(pad); paddedWidthSize = ::min(paddedWidthSize,widthPad - j); // view from width tiling auto outDistanceView = outDistanceHeightView.narrow(1, j, curWidthSize); auto outIndexView = outIndexHeightView.narrow(2, j, curWidthSize); auto burstView = burstHeightView.narrow(3, j, paddedWidthSize); for (int blk = 0; blk < nblocks_total; blk += tileBlocks) { if (InterruptCallback::is_interrupted()) { interrupt = true; break; } // // View for Buffers // auto curBlockSize = ::min(tileBlocks, nblocks_total - blk); // printf("(curHeightSize,curWidthSize,curBlockSize): (%d,%d,%d)\n", // curHeightSize,curWidthSize,curBlockSize); auto aveView = aveBufs[curStream] ->narrow(1, 0, curBlockSize) .narrow(2, 0, curHeightSize+2*psHalf) .narrow(3, 0, curWidthSize+2*psHalf); auto distanceBufView = distanceBufs[curStream] ->narrow(0, 0, curHeightSize) .narrow(1, 0, curWidthSize) .narrow(2, 0, curBlockSize); // // View for Blocks // auto blockLabelView = blockLabels.narrow(1, blk, curBlockSize); // // Compute Average // runBurstAverage(burstView,blockLabelView, aveView,patchsize,nblocks, streams[curStream]); // thrust::fill(thrust::hip::par.on(stream), // aveView.data(), // aveView.end(), // 0.); // // Execute Template Search // runBurstNnfL2Norm(burstView,aveView, blockLabelView, distanceBufView, // outDistanceView, patchsize,nblocks,true, streams[curStream]); // // Top K Selection // // select "topK" from "curBlockSize" of outDistances // this "topK" selection is limited to a "curBlockSize" batch // runBurstNnfSimpleBlockSelect(distanceBufView, blockLabelView, outDistanceView, outIndexView, valMean, false,k,streams[curStream]); // auto indexingBuf = indexingBufs[curStream] // ->narrow(0,0,curHeightSize) // .narrow(1,0,curWidthSize); // runBurstBlockSelect(distanceBufView, // // blockLabelView, // outDistanceView, // indexingBuf, // //outIndexView, // // valMean, // false,k,streams[curStream]); // runBlockIndices2Labels(indexingBuf, // outIndexView, // blockLabelView, // streams[curStream]); } // batching over blockTiles // // Top K Selection: Compare across Inputs & Outputs (e.g. "Pairs") // // runBurstBlockSelectPairs(distanceBufView, // // blockLabelView, // outDistanceView, // indexingBuf, // //outIndexView, // // valMean, // false,k,streams[curStream]); // // Convert topK "BlockLabel INDICES" to "BlockLabel VALS" // // convertLocs2Blocks(indexBuf curStream = (curStream + 1) % nstreams; } // batching over widthTiles } // batching over heightTiles // Have the desired ordering stream wait on the multi-stream streamWait({stream}, streams); if (interrupt) { FAISS_THROW_MSG("interrupted"); } } void runBurstPatchDistance( GpuResources* res, hipStream_t stream, Tensor<float, 4, true>& burst, Tensor<int, 3, true>& blockLabels, int k, int t, int h, int w, int c, int patchsize, int nblocks, float valMean, Tensor<float, 3, true>& outDistances, Tensor<int, 5, true>& outIndices, bool computeL2){ runBurstPatchDistance<float>( res, stream, burst, blockLabels, k,t,h,w,c, patchsize, nblocks, valMean, outDistances, outIndices, computeL2); } void runBurstPatchDistance( GpuResources* res, hipStream_t stream, Tensor<half, 4, true>& burst, Tensor<int, 3, true>& blockLabels, int k, int t, int h, int w, int c, int patchsize, int nblocks, float valMean, Tensor<float, 3, true>& outDistances, Tensor<int, 5, true>& outIndices, bool computeL2){ runBurstPatchDistance<half>( res, stream, burst, blockLabels, k,t,h,w,c, patchsize, nblocks, valMean, outDistances, outIndices, computeL2); } } // end namespace gpu } // end namespace faiss
25f3039226f9b60426ffbaaaa8a3e94232c69aec.cu
/** * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ #include <faiss/gpu/GpuResources.h> #include <faiss/gpu/utils/DeviceUtils.h> #include <faiss/impl/AuxIndexStructures.h> #include <faiss/impl/FaissAssert.h> #include <faiss/gpu/impl/BroadcastSum.cuh> #include <faiss/gpu/impl/BroadcastSumBurst.cuh> #include <faiss/gpu/impl/BurstPatchDistance.cuh> #include <faiss/gpu/impl/DistanceUtils.cuh> #include <faiss/gpu/impl/L2Norm.cuh> #include <faiss/gpu/impl/BurstNnfL2Norm.cuh> #include <faiss/gpu/impl/L2Select.cuh> #include <faiss/gpu/utils/BurstBlockSelectKernel.cuh> #include <faiss/gpu/utils/DeviceDefs.cuh> #include <faiss/gpu/utils/Limits.cuh> #include <faiss/gpu/utils/MatrixMult.cuh> #include <faiss/gpu/utils/BurstNnfSimpleBlockSelect.cuh> #include <faiss/gpu/utils/BlockIndices2Labels.cuh> #include <cstdio> #include <thrust/device_ptr.h> #include <thrust/execution_policy.h> #include <thrust/fill.h> #include <thrust/for_each.h> #include <algorithm> #include <memory> namespace faiss { namespace gpu { template <typename T> void runBurstPatchDistance( GpuResources* res, cudaStream_t stream, Tensor<T, 4, true>& burst, Tensor<int, 3, true>& blockLabels, int k, int t, int h, int w, int c, int patchsize, int nblocks, float valMean, Tensor<float, 3, true>& outDistances, Tensor<int, 5, true>& outIndices, bool computeL2) { // Size of proposed image auto nframes = burst.getSize(0); auto nftrs = burst.getSize(1); auto heightPad = burst.getSize(2); auto widthPad = burst.getSize(3); int nblocks_total = blockLabels.getSize(1); int nblocks2 = nblocks*nblocks; int pad = std::floor(patchsize/2) + std::floor(nblocks/2); int psHalf = std::floor(patchsize/2); constexpr int nstreams = 16; // Size of vals image auto height = outDistances.getSize(0); auto width = outDistances.getSize(1); auto kOut = outDistances.getSize(2); // Size of indices image auto nframes_outInd = outIndices.getSize(0); auto heightInd = outIndices.getSize(1); auto widthInd = outIndices.getSize(2); auto kOutInd = outIndices.getSize(3); auto two = outIndices.getSize(4); // Assert same size FAISS_ASSERT(nframes == nframes_outInd); FAISS_ASSERT(height == (heightPad-2*pad)); FAISS_ASSERT(width == (widthPad-2*pad)); FAISS_ASSERT(height == heightInd); FAISS_ASSERT(width == widthInd); FAISS_ASSERT(kOut == k); FAISS_ASSERT(kOutInd == k); FAISS_ASSERT(two == 2); // FAISS_ASSERT(nblocks_total == utils::pow(nblocks2,nframes-1)); // init for comparison right now, to be removed. // thrust::fill(thrust::cuda::par.on(stream), // outDistances.data(), // outDistances.end(), // Limits<float>::getMax()); // If we're querying against a 0 sized set, just return empty results if (height == 0 || width == 0 || nftrs == 0) { thrust::fill( thrust::cuda::par.on(stream), outDistances.data(), outDistances.end(), Limits<float>::getMax()); thrust::fill( thrust::cuda::par.on(stream), outIndices.data(), outIndices.end(), -1); return; } // By default, aim to use up to 512 MB of memory for the processing, with // both number of queries and number of centroids being at least 512. int tileHeight = 0; // batchsize across height int tileWidth = 0; // batchsize across width int tileBlocks = 0; // batchsize across blocks chooseImageTileSize( height, // image height width, // image width nftrs, // num of features per pixel patchsize, // patchsize nblocks_total, // number of image blocks to search sizeof(T), res->getTempMemoryAvailableCurrentDevice(), tileHeight, tileWidth, tileBlocks); // tileBlocks = 128; int numHeightTiles = utils::divUp(height, tileHeight); int numWidthTiles = utils::divUp(width, tileWidth); int numBlockTiles = utils::divUp((nblocks_total), tileBlocks); // printf("(tileHeight,tileWidth,tileBlocks): (%d,%d,%d)\n", // tileHeight,tileWidth,tileBlocks); // We can have any number of vectors to query against, even less than k, in // which case we'll return -1 for the index FAISS_ASSERT(k <= GPU_MAX_SELECTION_K); // select limitation // // Temporary memory space to *execute* a single batch // DeviceTensor<float, 3, true> distanceBuf_1(res, makeTempAlloc(AllocType::Other, stream), {tileHeight, tileWidth, tileBlocks}); DeviceTensor<float, 3, true> distanceBuf_2(res, makeTempAlloc(AllocType::Other, stream), {tileHeight, tileWidth, tileBlocks}); DeviceTensor<float, 3, true> distanceBuf_3(res, makeTempAlloc(AllocType::Other, stream), {tileHeight, tileWidth, tileBlocks}); DeviceTensor<float, 3, true> distanceBuf_4(res, makeTempAlloc(AllocType::Other, stream), {tileHeight, tileWidth, tileBlocks}); DeviceTensor<float, 3, true> distanceBuf_5(res, makeTempAlloc(AllocType::Other, stream), {tileHeight, tileWidth, tileBlocks}); DeviceTensor<float, 3, true> distanceBuf_6(res, makeTempAlloc(AllocType::Other, stream), {tileHeight, tileWidth, tileBlocks}); DeviceTensor<float, 3, true> distanceBuf_7(res, makeTempAlloc(AllocType::Other, stream), {tileHeight, tileWidth, tileBlocks}); DeviceTensor<float, 3, true> distanceBuf_8(res, makeTempAlloc(AllocType::Other, stream), {tileHeight, tileWidth, tileBlocks}); DeviceTensor<float, 3, true> distanceBuf_9(res, makeTempAlloc(AllocType::Other, stream), {tileHeight, tileWidth, tileBlocks}); DeviceTensor<float, 3, true> distanceBuf_10(res, makeTempAlloc(AllocType::Other, stream), {tileHeight, tileWidth, tileBlocks}); DeviceTensor<float, 3, true> distanceBuf_11(res, makeTempAlloc(AllocType::Other, stream), {tileHeight, tileWidth, tileBlocks}); DeviceTensor<float, 3, true> distanceBuf_12(res, makeTempAlloc(AllocType::Other, stream), {tileHeight, tileWidth, tileBlocks}); DeviceTensor<float, 3, true> distanceBuf_13(res, makeTempAlloc(AllocType::Other, stream), {tileHeight, tileWidth, tileBlocks}); DeviceTensor<float, 3, true> distanceBuf_14(res, makeTempAlloc(AllocType::Other, stream), {tileHeight, tileWidth, tileBlocks}); DeviceTensor<float, 3, true> distanceBuf_15(res, makeTempAlloc(AllocType::Other, stream), {tileHeight, tileWidth, tileBlocks}); DeviceTensor<float, 3, true> distanceBuf_16(res, makeTempAlloc(AllocType::Other, stream), {tileHeight, tileWidth, tileBlocks}); DeviceTensor<float, 3, true>* distanceBufs[16] = {&distanceBuf_1, &distanceBuf_2, &distanceBuf_3, &distanceBuf_4, &distanceBuf_5, &distanceBuf_6, &distanceBuf_7, &distanceBuf_8, &distanceBuf_9, &distanceBuf_10, &distanceBuf_11, &distanceBuf_12, &distanceBuf_13, &distanceBuf_14, &distanceBuf_15, &distanceBuf_16}; // std::vector<DeviceTensor<float, 3, true>> distanceBufs(nstreams, // DeviceTensor<float, 3, true>(res, // makeTempAlloc(AllocType::Other, stream), // {tileHeight, tileWidth, tileBlocks})); // DeviceTensor<float, 3, true>** distanceBufs = new DeviceTensor<float, 3, true>*[nstreams]; // std::vector<DeviceTensor<float, 3, true>> distanceBufs; // distanceBufs.resize(nstreams); // #pragma unroll // for (int i = 0; i < nstreams; ++i){ // auto distBuf_i = new DeviceTensor<float, 3, true>(res, // makeTempAlloc(AllocType::Other, stream), // {tileHeight, tileWidth, tileBlocks}); // distanceBufs[i] = distBuf_i; // // distanceBufs.push_back(distBuf_i); // } // for (int i = 0; i < nstreams; ++i){ // for (int j = 0; j < distanceBufs[i].NumDim; ++j){ // printf("[%d]: getSize(%d): %d\n",i,j,distanceBufs[i].getSize(j)); // } // //isContiguous // } DeviceTensor<int, 3, true> indexingBuf_1(res, makeTempAlloc(AllocType::Other, stream), {tileHeight, tileWidth, k}); DeviceTensor<int, 3, true> indexingBuf_2(res, makeTempAlloc(AllocType::Other, stream), {tileHeight, tileWidth, k}); DeviceTensor<int, 3, true> indexingBuf_3(res, makeTempAlloc(AllocType::Other, stream), {tileHeight, tileWidth, k}); DeviceTensor<int, 3, true> indexingBuf_4(res, makeTempAlloc(AllocType::Other, stream), {tileHeight, tileWidth, k}); DeviceTensor<int, 3, true> indexingBuf_5(res, makeTempAlloc(AllocType::Other, stream), {tileHeight, tileWidth, k}); DeviceTensor<int, 3, true> indexingBuf_6(res, makeTempAlloc(AllocType::Other, stream), {tileHeight, tileWidth, k}); DeviceTensor<int, 3, true> indexingBuf_7(res, makeTempAlloc(AllocType::Other, stream), {tileHeight, tileWidth, k}); DeviceTensor<int, 3, true> indexingBuf_8(res, makeTempAlloc(AllocType::Other, stream), {tileHeight, tileWidth, k}); DeviceTensor<int, 3, true> indexingBuf_9(res, makeTempAlloc(AllocType::Other, stream), {tileHeight, tileWidth, k}); DeviceTensor<int, 3, true> indexingBuf_10(res, makeTempAlloc(AllocType::Other, stream), {tileHeight, tileWidth, k}); DeviceTensor<int, 3, true> indexingBuf_11(res, makeTempAlloc(AllocType::Other, stream), {tileHeight, tileWidth, k}); DeviceTensor<int, 3, true> indexingBuf_12(res, makeTempAlloc(AllocType::Other, stream), {tileHeight, tileWidth, k}); DeviceTensor<int, 3, true> indexingBuf_13(res, makeTempAlloc(AllocType::Other, stream), {tileHeight, tileWidth, k}); DeviceTensor<int, 3, true> indexingBuf_14(res, makeTempAlloc(AllocType::Other, stream), {tileHeight, tileWidth, k}); DeviceTensor<int, 3, true> indexingBuf_15(res, makeTempAlloc(AllocType::Other, stream), {tileHeight, tileWidth, k}); DeviceTensor<int, 3, true> indexingBuf_16(res, makeTempAlloc(AllocType::Other, stream), {tileHeight, tileWidth, k}); DeviceTensor<int, 3, true>* indexingBufs[16] = {&indexingBuf_1, &indexingBuf_2, &indexingBuf_3, &indexingBuf_4, &indexingBuf_5, &indexingBuf_6, &indexingBuf_7, &indexingBuf_8, &indexingBuf_9, &indexingBuf_10, &indexingBuf_11, &indexingBuf_12, &indexingBuf_13, &indexingBuf_14, &indexingBuf_15, &indexingBuf_16,}; // // Temporary memory space to *ave* a single batch of images // int tileHeightPad = tileHeight + 2*psHalf; int tileWidthPad = tileWidth + 2*psHalf; DeviceTensor<T, 4, true> aveBuf_1(res, makeTempAlloc(AllocType::Other, stream), {nftrs, tileBlocks, tileHeightPad, tileWidthPad}); DeviceTensor<T, 4, true> aveBuf_2(res, makeTempAlloc(AllocType::Other, stream), {nftrs, tileBlocks, tileHeightPad, tileWidthPad}); DeviceTensor<T, 4, true> aveBuf_3(res, makeTempAlloc(AllocType::Other, stream), {nftrs, tileBlocks, tileHeightPad, tileWidthPad}); DeviceTensor<T, 4, true> aveBuf_4(res, makeTempAlloc(AllocType::Other, stream), {nftrs, tileBlocks, tileHeightPad, tileWidthPad}); DeviceTensor<T, 4, true> aveBuf_5(res, makeTempAlloc(AllocType::Other, stream), {nftrs, tileBlocks, tileHeightPad, tileWidthPad}); DeviceTensor<T, 4, true> aveBuf_6(res, makeTempAlloc(AllocType::Other, stream), {nftrs, tileBlocks, tileHeightPad, tileWidthPad}); DeviceTensor<T, 4, true> aveBuf_7(res, makeTempAlloc(AllocType::Other, stream), {nftrs, tileBlocks, tileHeightPad, tileWidthPad}); DeviceTensor<T, 4, true> aveBuf_8(res, makeTempAlloc(AllocType::Other, stream), {nftrs, tileBlocks, tileHeightPad, tileWidthPad}); DeviceTensor<T, 4, true> aveBuf_9(res, makeTempAlloc(AllocType::Other, stream), {nftrs, tileBlocks, tileHeightPad, tileWidthPad}); DeviceTensor<T, 4, true> aveBuf_10(res, makeTempAlloc(AllocType::Other, stream), {nftrs, tileBlocks, tileHeightPad, tileWidthPad}); DeviceTensor<T, 4, true> aveBuf_11(res, makeTempAlloc(AllocType::Other, stream), {nftrs, tileBlocks, tileHeightPad, tileWidthPad}); DeviceTensor<T, 4, true> aveBuf_12(res, makeTempAlloc(AllocType::Other, stream), {nftrs, tileBlocks, tileHeightPad, tileWidthPad}); DeviceTensor<T, 4, true> aveBuf_13(res, makeTempAlloc(AllocType::Other, stream), {nftrs, tileBlocks, tileHeightPad, tileWidthPad}); DeviceTensor<T, 4, true> aveBuf_14(res, makeTempAlloc(AllocType::Other, stream), {nftrs, tileBlocks, tileHeightPad, tileWidthPad}); DeviceTensor<T, 4, true> aveBuf_15(res, makeTempAlloc(AllocType::Other, stream), {nftrs, tileBlocks, tileHeightPad, tileWidthPad}); DeviceTensor<T, 4, true> aveBuf_16(res, makeTempAlloc(AllocType::Other, stream), {nftrs, tileBlocks, tileHeightPad, tileWidthPad}); DeviceTensor<T, 4, true>* aveBufs[16]; aveBufs[0] = &aveBuf_1; aveBufs[1] = &aveBuf_2; aveBufs[2] = &aveBuf_3; aveBufs[3] = &aveBuf_4; aveBufs[4] = &aveBuf_5; aveBufs[5] = &aveBuf_6; aveBufs[6] = &aveBuf_7; aveBufs[7] = &aveBuf_8; aveBufs[8] = &aveBuf_9; aveBufs[9] = &aveBuf_10; aveBufs[10] = &aveBuf_11; aveBufs[11] = &aveBuf_12; aveBufs[12] = &aveBuf_13; aveBufs[13] = &aveBuf_14; aveBufs[14] = &aveBuf_15; aveBufs[15] = &aveBuf_16; // DeviceTensor<T, 4, true>* aveBufs[2]; // std::vector<DeviceTensor<T, 4, true>> aveBufsVec; // aveBufsVec.reserve(2); // #pragma unroll // for (int i = 0; i < 2; ++i){ // DeviceTensor<T, 4, true> aveBuf_i(res, // makeTempAlloc(AllocType::Other, stream), // {nftrs, tileBlocks, tileHeightPad, tileWidthPad}); // aveBufs.push_back(aveBuf_i); // aveBufsVec.push_back(DeviceTensor<T, 4, true>(res, // makeTempAlloc(AllocType::Other, stream), // {nftrs, tileBlocks, tileHeightPad, tileWidthPad})); // aveBufs[i] = &aveBuf_i; // aveBufs[i] = new DeviceTensor<T, 4, true> (res, // makeTempAlloc(AllocType::Other, stream), // {nftrs, tileBlocks, tileHeightPad, tileWidthPad}); // } // DeviceTensor<T, 4, true>* aveBufs[nstreams] = {}; // for (int i = 0; i < nstreams; ++i){ // auto aveBuf_i = DeviceTensor<T, 4, true>(res, // makeTempAlloc(AllocType::Other, stream), // {nftrs, tileBlocks, tileHeightPad, tileWidthPad}); // res->syncDefaultStreamCurrentDevice(); // aveBufs[i] = &aveBuf_i; // } // Streams allow for concurrent kernel execs. auto streams = res->getAlternateStreamsCurrentDevice(); streamWait(streams, {stream}); int curStream = 0; bool interrupt = false; // Tile HEIGHT pixels for (int i = 0; i < height; i += tileHeight) { if (interrupt || InterruptCallback::is_interrupted()) { interrupt = true; break; } // create indices for height tiling int curHeightSize = std::min(tileHeight, height - i); int paddedWidthSize = tileWidth + 2*(pad); int paddedHeightSize = tileHeight + 2*(pad); paddedHeightSize = std::min(paddedHeightSize,heightPad - i); // create views from height tile auto outDistanceHeightView = outDistances.narrow(0, i, curHeightSize); auto outIndexHeightView = outIndices.narrow(1, i, curHeightSize); auto burstHeightView = burst.narrow(2, i, paddedHeightSize); // Tile WIDTH pixels for (int j = 0; j < width; j += tileWidth) { if (InterruptCallback::is_interrupted()) { interrupt = true; break; } // create indices for height tiling int curWidthSize = std::min(tileWidth, width - j); int paddedWidthSize = tileWidth + 2*(pad); paddedWidthSize = std::min(paddedWidthSize,widthPad - j); // view from width tiling auto outDistanceView = outDistanceHeightView.narrow(1, j, curWidthSize); auto outIndexView = outIndexHeightView.narrow(2, j, curWidthSize); auto burstView = burstHeightView.narrow(3, j, paddedWidthSize); for (int blk = 0; blk < nblocks_total; blk += tileBlocks) { if (InterruptCallback::is_interrupted()) { interrupt = true; break; } // // View for Buffers // auto curBlockSize = std::min(tileBlocks, nblocks_total - blk); // printf("(curHeightSize,curWidthSize,curBlockSize): (%d,%d,%d)\n", // curHeightSize,curWidthSize,curBlockSize); auto aveView = aveBufs[curStream] ->narrow(1, 0, curBlockSize) .narrow(2, 0, curHeightSize+2*psHalf) .narrow(3, 0, curWidthSize+2*psHalf); auto distanceBufView = distanceBufs[curStream] ->narrow(0, 0, curHeightSize) .narrow(1, 0, curWidthSize) .narrow(2, 0, curBlockSize); // // View for Blocks // auto blockLabelView = blockLabels.narrow(1, blk, curBlockSize); // // Compute Average // runBurstAverage(burstView,blockLabelView, aveView,patchsize,nblocks, streams[curStream]); // thrust::fill(thrust::cuda::par.on(stream), // aveView.data(), // aveView.end(), // 0.); // // Execute Template Search // runBurstNnfL2Norm(burstView,aveView, blockLabelView, distanceBufView, // outDistanceView, patchsize,nblocks,true, streams[curStream]); // // Top K Selection // // select "topK" from "curBlockSize" of outDistances // this "topK" selection is limited to a "curBlockSize" batch // runBurstNnfSimpleBlockSelect(distanceBufView, blockLabelView, outDistanceView, outIndexView, valMean, false,k,streams[curStream]); // auto indexingBuf = indexingBufs[curStream] // ->narrow(0,0,curHeightSize) // .narrow(1,0,curWidthSize); // runBurstBlockSelect(distanceBufView, // // blockLabelView, // outDistanceView, // indexingBuf, // //outIndexView, // // valMean, // false,k,streams[curStream]); // runBlockIndices2Labels(indexingBuf, // outIndexView, // blockLabelView, // streams[curStream]); } // batching over blockTiles // // Top K Selection: Compare across Inputs & Outputs (e.g. "Pairs") // // runBurstBlockSelectPairs(distanceBufView, // // blockLabelView, // outDistanceView, // indexingBuf, // //outIndexView, // // valMean, // false,k,streams[curStream]); // // Convert topK "BlockLabel INDICES" to "BlockLabel VALS" // // convertLocs2Blocks(indexBuf curStream = (curStream + 1) % nstreams; } // batching over widthTiles } // batching over heightTiles // Have the desired ordering stream wait on the multi-stream streamWait({stream}, streams); if (interrupt) { FAISS_THROW_MSG("interrupted"); } } void runBurstPatchDistance( GpuResources* res, cudaStream_t stream, Tensor<float, 4, true>& burst, Tensor<int, 3, true>& blockLabels, int k, int t, int h, int w, int c, int patchsize, int nblocks, float valMean, Tensor<float, 3, true>& outDistances, Tensor<int, 5, true>& outIndices, bool computeL2){ runBurstPatchDistance<float>( res, stream, burst, blockLabels, k,t,h,w,c, patchsize, nblocks, valMean, outDistances, outIndices, computeL2); } void runBurstPatchDistance( GpuResources* res, cudaStream_t stream, Tensor<half, 4, true>& burst, Tensor<int, 3, true>& blockLabels, int k, int t, int h, int w, int c, int patchsize, int nblocks, float valMean, Tensor<float, 3, true>& outDistances, Tensor<int, 5, true>& outIndices, bool computeL2){ runBurstPatchDistance<half>( res, stream, burst, blockLabels, k,t,h,w,c, patchsize, nblocks, valMean, outDistances, outIndices, computeL2); } } // end namespace gpu } // end namespace faiss
98d3db1017847abe7e5f229f0d9853c5091cec01.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <transform.h> __device__ float op(float d1,float *params) { return -d1; } extern "C" __global__ void neg_strided_float(int n,int idx,float *dy,int incy,float *params,float *result) { transform(n,idx,dy,incy,params,result); }
98d3db1017847abe7e5f229f0d9853c5091cec01.cu
#include <transform.h> __device__ float op(float d1,float *params) { return -d1; } extern "C" __global__ void neg_strided_float(int n,int idx,float *dy,int incy,float *params,float *result) { transform(n,idx,dy,incy,params,result); }
a3bd9613ce20be4dd1a0c008084472fc30d3018a.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> //#include <cstdlib.h> #include <stdlib.h> #include <time.h> #include <hip/hip_runtime.h> using namespace std; __global__ void vectorAdd(float *g_odata, float *g_idata, int n) { /* extern __shared__ float temp[]; //allocated on invocation int thid = threadIdx.x; int offset = 1; temp[2*thid] = g_idata[2*thid]; // load input into shared memory temp[2*thid+1] = g_idata[2*thid+1]; for (int d = n>>1; d > 0; d >>= 1) // build sum in place up the tree { __syncthreads(); if (thid < d) { int ai = offset*(2*thid+1)-1; int bi = offset*(2*thid+2)-1; temp[bi] += temp[ai]; } offset *= 2; } if (thid==0) { temp[n-1] = 0;} // clear the last element for (int d = 1; d < n; d*= 2) // traverse down tree & build scan { offset >>=1; __syncthreads(); if (thid < d) { int ai = offset*(2*thid+1)-1; int bi = offset*(2*thid+2)-1; float t = temp[ai]; temp[ai] = temp[bi]; temp[bi] += t; } } __syncthreads(); g_odata[2*thid] = temp[2*thid]; //write results to device memory g_odata[2*thid+1] = temp[2*thid+1]; */ } int main(void) { hipError_t err = hipSuccess; int numElements = 15; size_t size = numElements * sizeof(float); //Allocate the host input vector A float *h_A = (float *)malloc(size); //Verify that allocations succeeded if (h_A == NULL) { fprintf(stderr, "Failed to allocate host vector A!\n"); exit(EXIT_FAILURE); } //Allocate the host input vector B float *h_B = (float *)malloc(size); //Verify that allocations succeeded if (h_B == NULL) { fprintf(stderr, "Failed to allocate host vector B!\n"); exit(EXIT_FAILURE); } //Initialize the host input vectors for (int i = 0; i < numElements; ++i) { h_A[i] = rand() % 101; // fprintf("%s \n",h_A[i]); } // Allocate the deivce input vector A float *d_A = NULL; err = hipMalloc((void **)&d_A, size); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device vector A!\n"); exit(EXIT_FAILURE); } // Allocate the device input vector B float *d_B = NULL; err = hipMalloc((void **)&d_B, size); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device vector B!\n"); exit(EXIT_FAILURE); } //Copy the host input vectors A in host memory to the device input vectors // in device memory printf("Copy input data from the host memory to the CUDA device\n"); err = hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice); if (err != hipSuccess) { fprintf(stderr, "Failed to copy vector A from host to device!\n"); exit(EXIT_FAILURE); } printf("Copy input data from the host memory to the CUDA device\n"); err = hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice); if (err != hipSuccess) { fprintf(stderr, "Failed to copy vector A from host to device!\n"); exit(EXIT_FAILURE); } // Launch the vector add cuda kernel int threadsPerBlock = 256; int blocksPerGrid = (numElements + threadsPerBlock - 1)/threadsPerBlock; hipLaunchKernelGGL(( vectorAdd), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A, d_B, numElements); //Copy the device result vector in device memory to the host result vector // in host memory err = hipMemcpy(h_B, d_B, size, hipMemcpyDeviceToHost); if (err != hipSuccess) { printf("%i",err); fprintf(stderr, "Failed to copy vector B from device to host!\n"); exit(EXIT_FAILURE); } for (int i = 0; i < numElements; i++) { // cout<<h_B[i]<<'\n'; } // Free device global memory err = hipFree(d_A); if (err != hipSuccess) { fprintf(stderr, "Failed to free device vector A!\n"); exit(EXIT_FAILURE); } err = hipFree(d_B); if (err != hipSuccess) { fprintf(stderr, "Failed to free device vector B!\n"); exit(EXIT_FAILURE); } //free host memory free(h_A); free(h_B); // reset device and exit err = hipDeviceReset(); if (err != hipSuccess) { fprintf(stderr, "Failed to deinitialize the device!\n"); exit(EXIT_FAILURE); } return 0; }
a3bd9613ce20be4dd1a0c008084472fc30d3018a.cu
#include <stdio.h> //#include <cstdlib.h> #include <stdlib.h> #include <time.h> #include <cuda_runtime.h> using namespace std; __global__ void vectorAdd(float *g_odata, float *g_idata, int n) { /* extern __shared__ float temp[]; //allocated on invocation int thid = threadIdx.x; int offset = 1; temp[2*thid] = g_idata[2*thid]; // load input into shared memory temp[2*thid+1] = g_idata[2*thid+1]; for (int d = n>>1; d > 0; d >>= 1) // build sum in place up the tree { __syncthreads(); if (thid < d) { int ai = offset*(2*thid+1)-1; int bi = offset*(2*thid+2)-1; temp[bi] += temp[ai]; } offset *= 2; } if (thid==0) { temp[n-1] = 0;} // clear the last element for (int d = 1; d < n; d*= 2) // traverse down tree & build scan { offset >>=1; __syncthreads(); if (thid < d) { int ai = offset*(2*thid+1)-1; int bi = offset*(2*thid+2)-1; float t = temp[ai]; temp[ai] = temp[bi]; temp[bi] += t; } } __syncthreads(); g_odata[2*thid] = temp[2*thid]; //write results to device memory g_odata[2*thid+1] = temp[2*thid+1]; */ } int main(void) { cudaError_t err = cudaSuccess; int numElements = 15; size_t size = numElements * sizeof(float); //Allocate the host input vector A float *h_A = (float *)malloc(size); //Verify that allocations succeeded if (h_A == NULL) { fprintf(stderr, "Failed to allocate host vector A!\n"); exit(EXIT_FAILURE); } //Allocate the host input vector B float *h_B = (float *)malloc(size); //Verify that allocations succeeded if (h_B == NULL) { fprintf(stderr, "Failed to allocate host vector B!\n"); exit(EXIT_FAILURE); } //Initialize the host input vectors for (int i = 0; i < numElements; ++i) { h_A[i] = rand() % 101; // fprintf("%s \n",h_A[i]); } // Allocate the deivce input vector A float *d_A = NULL; err = cudaMalloc((void **)&d_A, size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector A!\n"); exit(EXIT_FAILURE); } // Allocate the device input vector B float *d_B = NULL; err = cudaMalloc((void **)&d_B, size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector B!\n"); exit(EXIT_FAILURE); } //Copy the host input vectors A in host memory to the device input vectors // in device memory printf("Copy input data from the host memory to the CUDA device\n"); err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector A from host to device!\n"); exit(EXIT_FAILURE); } printf("Copy input data from the host memory to the CUDA device\n"); err = cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector A from host to device!\n"); exit(EXIT_FAILURE); } // Launch the vector add cuda kernel int threadsPerBlock = 256; int blocksPerGrid = (numElements + threadsPerBlock - 1)/threadsPerBlock; vectorAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, numElements); //Copy the device result vector in device memory to the host result vector // in host memory err = cudaMemcpy(h_B, d_B, size, cudaMemcpyDeviceToHost); if (err != cudaSuccess) { printf("%i",err); fprintf(stderr, "Failed to copy vector B from device to host!\n"); exit(EXIT_FAILURE); } for (int i = 0; i < numElements; i++) { // cout<<h_B[i]<<'\n'; } // Free device global memory err = cudaFree(d_A); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device vector A!\n"); exit(EXIT_FAILURE); } err = cudaFree(d_B); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device vector B!\n"); exit(EXIT_FAILURE); } //free host memory free(h_A); free(h_B); // reset device and exit err = cudaDeviceReset(); if (err != cudaSuccess) { fprintf(stderr, "Failed to deinitialize the device!\n"); exit(EXIT_FAILURE); } return 0; }
827a02c65a7e58e32079053588518e7133786c76.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 2020 NVIDIA Corporation. All rights reserved. * * NOTICE TO LICENSEE: * * This source code and/or documentation ("Licensed Deliverables") are * subject to NVIDIA intellectual property rights under U.S. and * international Copyright laws. * * These Licensed Deliverables contained herein is PROPRIETARY and * CONFIDENTIAL to NVIDIA and is being provided under the terms and * conditions of a form of NVIDIA software license agreement by and * between NVIDIA and Licensee ("License Agreement") or electronically * accepted by Licensee. Notwithstanding any terms or conditions to * the contrary in the License Agreement, reproduction or disclosure * of the Licensed Deliverables to any third party without the express * written consent of NVIDIA is prohibited. * * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE * OF THESE LICENSED DELIVERABLES. * * U.S. Government End Users. These Licensed Deliverables are a * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT * 1995), consisting of "commercial computer software" and "commercial * computer software documentation" as such terms are used in 48 * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government * only as a commercial end item. Consistent with 48 C.F.R.12.212 and * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all * U.S. Government End Users acquire the Licensed Deliverables with * only those rights set forth herein. * * Any use of the Licensed Deliverables in individual and commercial * software must include, in the user documentation and internal * comments to the code, the above Disclaimer and U.S. Government End * Users Notice. */ #include <cstdio> #include <cstdlib> #include <vector> #include <rocblas.h> #include <hip/hip_runtime.h> #include "cublas_utils.h" using data_type = double; int main(int argc, char *argv[]) { hipblasHandle_t cublasH = NULL; hipStream_t stream = NULL; /* * A = 2.10 * B = 1.20 */ data_type A = 2.1; data_type B = 1.2; data_type c = 2.1; data_type s = 1.2; printf("A\n"); std::printf("%0.2f\n", A); printf("=====\n"); printf("B\n"); std::printf("%0.2f\n", B); printf("=====\n"); /* step 1: create cublas handle, bind a stream */ CUBLAS_CHECK(hipblasCreate(&cublasH)); CUDA_CHECK(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking)); CUBLAS_CHECK(hipblasSetStream(cublasH, stream)); /* step 3: compute */ CUBLAS_CHECK(hipblasDrotg(cublasH, &A, &B, &c, &s)); CUDA_CHECK(hipStreamSynchronize(stream)); /* * A = 2.42 * B = 0.50 */ printf("A\n"); std::printf("%0.2f\n", A); printf("=====\n"); printf("B\n"); std::printf("%0.2f\n", B); printf("=====\n"); /* free resources */ CUBLAS_CHECK(hipblasDestroy(cublasH)); CUDA_CHECK(hipStreamDestroy(stream)); CUDA_CHECK(hipDeviceReset()); return EXIT_SUCCESS; }
827a02c65a7e58e32079053588518e7133786c76.cu
/* * Copyright 2020 NVIDIA Corporation. All rights reserved. * * NOTICE TO LICENSEE: * * This source code and/or documentation ("Licensed Deliverables") are * subject to NVIDIA intellectual property rights under U.S. and * international Copyright laws. * * These Licensed Deliverables contained herein is PROPRIETARY and * CONFIDENTIAL to NVIDIA and is being provided under the terms and * conditions of a form of NVIDIA software license agreement by and * between NVIDIA and Licensee ("License Agreement") or electronically * accepted by Licensee. Notwithstanding any terms or conditions to * the contrary in the License Agreement, reproduction or disclosure * of the Licensed Deliverables to any third party without the express * written consent of NVIDIA is prohibited. * * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE * OF THESE LICENSED DELIVERABLES. * * U.S. Government End Users. These Licensed Deliverables are a * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT * 1995), consisting of "commercial computer software" and "commercial * computer software documentation" as such terms are used in 48 * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government * only as a commercial end item. Consistent with 48 C.F.R.12.212 and * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all * U.S. Government End Users acquire the Licensed Deliverables with * only those rights set forth herein. * * Any use of the Licensed Deliverables in individual and commercial * software must include, in the user documentation and internal * comments to the code, the above Disclaimer and U.S. Government End * Users Notice. */ #include <cstdio> #include <cstdlib> #include <vector> #include <cublas_v2.h> #include <cuda_runtime.h> #include "cublas_utils.h" using data_type = double; int main(int argc, char *argv[]) { cublasHandle_t cublasH = NULL; cudaStream_t stream = NULL; /* * A = 2.10 * B = 1.20 */ data_type A = 2.1; data_type B = 1.2; data_type c = 2.1; data_type s = 1.2; printf("A\n"); std::printf("%0.2f\n", A); printf("=====\n"); printf("B\n"); std::printf("%0.2f\n", B); printf("=====\n"); /* step 1: create cublas handle, bind a stream */ CUBLAS_CHECK(cublasCreate(&cublasH)); CUDA_CHECK(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking)); CUBLAS_CHECK(cublasSetStream(cublasH, stream)); /* step 3: compute */ CUBLAS_CHECK(cublasDrotg(cublasH, &A, &B, &c, &s)); CUDA_CHECK(cudaStreamSynchronize(stream)); /* * A = 2.42 * B = 0.50 */ printf("A\n"); std::printf("%0.2f\n", A); printf("=====\n"); printf("B\n"); std::printf("%0.2f\n", B); printf("=====\n"); /* free resources */ CUBLAS_CHECK(cublasDestroy(cublasH)); CUDA_CHECK(cudaStreamDestroy(stream)); CUDA_CHECK(cudaDeviceReset()); return EXIT_SUCCESS; }
eab0f073a39bc5c171c9fc12940b217ef4522c33.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "core/providers/cuda/cuda_common.h" #include "core/providers/cuda/cu_inc/elementwise_impl.cuh" namespace onnxruntime { namespace cuda { template <typename T, typename TLabel, typename TOut, bool IsWeighted, typename TIndex> struct OpSoftmaxCrossEntropyWeights { OpSoftmaxCrossEntropyWeights(const TLabel* label_data, const T* weight_data, TLabel C, TLabel ignore_index) : label_data_(label_data), weight_data_(weight_data), C_(C), ignore_index_(ignore_index) {} __device__ __inline__ TOut operator()(TIndex idx) const { if (label_data_[idx] != ignore_index_) { if (IsWeighted) { CUDA_KERNEL_ASSERT(label_data_[idx] >= 0 && label_data_[idx] < C_); return TOut(weight_data_[label_data_[idx]]); } return TOut(1.f); } return TOut(0.f); } const TLabel* label_data_; const T* weight_data_; TLabel C_; TLabel ignore_index_; }; template <typename T, typename TLabel, typename TOut> void ComputeSoftmaxCrossEntropyWeightsImpl(hipStream_t stream, const TLabel* label, const T* weight, size_t count, size_t label_depth, int64_t ignore_index, TOut* weight_data_nd) { if (weight) { typedef OpSoftmaxCrossEntropyWeights<T, TLabel, TOut, true, size_t> OP_Type; OP_Type op(label, weight, static_cast<TLabel>(label_depth), static_cast<TLabel>(ignore_index)); LaunchElementwiseKernel<TOut, OP_Type, size_t>(stream, weight_data_nd, op, count); } else { typedef OpSoftmaxCrossEntropyWeights<T, TLabel, TOut, false, size_t> OP_Type; OP_Type op(label, nullptr, static_cast<TLabel>(label_depth), static_cast<TLabel>(ignore_index)); LaunchElementwiseKernel<TOut, OP_Type, size_t>(stream, weight_data_nd, op, count); } } #define INSTANTIATE_COMPUTE_SCE_WEIGHTS_IMPL(T, TLabel, TOut) \ template void ComputeSoftmaxCrossEntropyWeightsImpl(hipStream_t stream, const TLabel* label, const T* weight, \ size_t count, size_t label_depth, int64_t ignore_index, \ TOut* weight_data_nd) INSTANTIATE_COMPUTE_SCE_WEIGHTS_IMPL(float, int32_t, float); INSTANTIATE_COMPUTE_SCE_WEIGHTS_IMPL(float, int64_t, float); INSTANTIATE_COMPUTE_SCE_WEIGHTS_IMPL(half, int32_t, float); INSTANTIATE_COMPUTE_SCE_WEIGHTS_IMPL(half, int64_t, float); INSTANTIATE_COMPUTE_SCE_WEIGHTS_IMPL(half, int64_t, half); INSTANTIATE_COMPUTE_SCE_WEIGHTS_IMPL(BFloat16, int64_t, BFloat16); #undef INSTANTIATE_COMPUTE_SCE_WEIGHTS_IMPL template <typename T, typename TAcc, typename TLabel, typename TIndex> struct OpWeightedSoftmaxCrossEntropyLoss { OpWeightedSoftmaxCrossEntropyLoss(const T* log_prob_data, const TLabel* label_data, const T* weight_data, const TAcc* normalize_factor_data, TLabel C, TLabel ignore_index) : log_prob_data_(log_prob_data), label_data_(label_data), weight_data_(weight_data), normalize_factor_data_(normalize_factor_data), C_(C), ignore_index_(ignore_index) {} __device__ __inline__ T operator()(TIndex idx) const { if (label_data_[idx] != ignore_index_) { CUDA_KERNEL_ASSERT(label_data_[idx] >= 0 && label_data_[idx] < C_); T ret = static_cast<T>(static_cast<TAcc>(-log_prob_data_[idx * C_ + label_data_[idx]] * weight_data_[idx]) / (*normalize_factor_data_)); return ret; } return T(0.f); } const T* log_prob_data_; const TLabel* label_data_; const T* weight_data_; const TAcc* normalize_factor_data_; TLabel C_; TLabel ignore_index_; }; template <typename T, typename TAcc, typename TLabel> void SoftmaxCrossEntropyLossImpl(hipStream_t stream, const T* log_prob, const TLabel* label, const T* weight, const TAcc* normalize_factor, size_t count, size_t label_depth, int64_t ignore_index, T* output_data) { typedef OpWeightedSoftmaxCrossEntropyLoss<T, TAcc, TLabel, size_t> OP_Type; OP_Type op(log_prob, label, weight, normalize_factor, static_cast<TLabel>(label_depth), static_cast<TLabel>(ignore_index)); LaunchElementwiseKernel<T, OP_Type, size_t>(stream, output_data, op, count); } template <typename T, typename TAcc, typename TLabel, typename TOut, bool IsReductionNone, bool HasBias, typename TIndex> struct OpWeightedSoftmaxCrossEntropyLossGrad { OpWeightedSoftmaxCrossEntropyLossGrad(const T* dY_data, const T* log_prob_data, const TLabel* label_data, const T* weight_data, const TAcc* normalize_factor_data, const TOut* bias_data, TLabel C) : dY_data_(dY_data), log_prob_data_(log_prob_data), label_data_(label_data), weight_data_(weight_data), normalize_factor_data_(normalize_factor_data), bias_data_(bias_data), C_(C) { C_fdm_ = DivMod(static_cast<TIndex>(C)); } __device__ __inline__ TOut operator()(TIndex idx) const { // normalize_factor is sum of labels' weights. Because zero sum implies all weights are 0, the loss function should // be constant 0 and its corresponding gradient should be 0 as well. TAcc result = TAcc(0.f); if (*normalize_factor_data_ != TAcc(0.f)) { TIndex row, d; C_fdm_.divmod(idx, row, d); CUDA_KERNEL_ASSERT(weight_data_[row] == T(0.f) || (label_data_[row] >= 0 && label_data_[row] < C_)); result = static_cast<TAcc>((IsReductionNone ? dY_data_[row] : *dY_data_) * weight_data_[row]) * (_Exp(static_cast<TAcc>(log_prob_data_[idx])) - (TAcc)(d == label_data_[row])) / (*normalize_factor_data_); } return HasBias ? static_cast<TOut>(result + static_cast<TAcc>(bias_data_[idx])) : static_cast<TOut>(result); } const T* dY_data_; const T* log_prob_data_; const TLabel* label_data_; const T* weight_data_; const TAcc* normalize_factor_data_; const TOut* bias_data_; TLabel C_; DivMod<TIndex> C_fdm_; }; template <typename T, typename TAcc, typename TLabel, typename TOut> void SoftmaxCrossEntropyLossGradImpl(hipStream_t stream, const T* dY, const T* log_prob, const TLabel* label, const T* weight, const TAcc* normalize_factor, const TOut* bias_data, size_t count, size_t label_depth, bool reduction_none, TOut* output_data) { #define LAUNCH_WEIGHTED_SOFTMAX_CROSS_ENTROPY_LOSS_GRAD_KERNEL(is_reduction_none, has_bias) \ uint64_t total_count = count * label_depth; \ if (total_count <= static_cast<uint64_t>(std::numeric_limits<int>::max())) { \ typedef OpWeightedSoftmaxCrossEntropyLossGrad<T, TAcc, TLabel, TOut, is_reduction_none, has_bias, int> OP_Type; \ OP_Type op(dY, log_prob, label, weight, normalize_factor, bias_data, static_cast<TLabel>(label_depth)); \ LaunchElementwiseKernel<TOut, OP_Type, int>(stream, output_data, op, static_cast<int>(total_count)); \ } else { \ typedef OpWeightedSoftmaxCrossEntropyLossGrad<T, TAcc, TLabel, TOut, is_reduction_none, has_bias, uint64_t> OP_Type; \ OP_Type op(dY, log_prob, label, weight, normalize_factor, bias_data, static_cast<TLabel>(label_depth)); \ LaunchElementwiseKernel<TOut, decltype(op), uint64_t>(stream, output_data, op, total_count); \ } if (reduction_none) { if (bias_data) { LAUNCH_WEIGHTED_SOFTMAX_CROSS_ENTROPY_LOSS_GRAD_KERNEL(true, true); } else { LAUNCH_WEIGHTED_SOFTMAX_CROSS_ENTROPY_LOSS_GRAD_KERNEL(true, false); } } else { if (bias_data) { LAUNCH_WEIGHTED_SOFTMAX_CROSS_ENTROPY_LOSS_GRAD_KERNEL(false, true); } else { LAUNCH_WEIGHTED_SOFTMAX_CROSS_ENTROPY_LOSS_GRAD_KERNEL(false, false); } } #undef LAUNCH_WEIGHTED_SOFTMAX_CROSS_ENTROPY_LOSS_GRAD_KERNEL } #define INSTANTIATE_SCE_LOSS_IMPL(T, TAcc, TLabel) \ template void SoftmaxCrossEntropyLossImpl(hipStream_t stream, const T* log_prob, const TLabel* label, const T* weight, \ const TAcc* normalize_factor, size_t count, size_t label_depth, \ int64_t ignore_index, T* output_data); INSTANTIATE_SCE_LOSS_IMPL(float, float, int32_t); INSTANTIATE_SCE_LOSS_IMPL(float, float, int64_t); INSTANTIATE_SCE_LOSS_IMPL(half, float, int64_t); INSTANTIATE_SCE_LOSS_IMPL(BFloat16, float, int64_t); #undef INSTANTIATE_SCE_LOSS_IMPL #define INSTANTIATE_SCE_LOSS_GRAD_IMPL(T, TAcc, TLabel, TOut) \ template void SoftmaxCrossEntropyLossGradImpl(hipStream_t stream, const T* dY, const T* log_prob, const TLabel* label, \ const T* weight, const TAcc* normalize_factor, const TOut* bias_data, \ size_t count, size_t label_depth, bool reducation_none, \ TOut* output_data) INSTANTIATE_SCE_LOSS_GRAD_IMPL(float, float, int32_t, float); INSTANTIATE_SCE_LOSS_GRAD_IMPL(float, float, int32_t, half); INSTANTIATE_SCE_LOSS_GRAD_IMPL(float, float, int64_t, float); INSTANTIATE_SCE_LOSS_GRAD_IMPL(float, float, int64_t, half); INSTANTIATE_SCE_LOSS_GRAD_IMPL(half, float, int64_t, half); INSTANTIATE_SCE_LOSS_GRAD_IMPL(BFloat16, float, int64_t, BFloat16); #undef INSTANTIATE_SCE_LOSS_GRAD_IMPL } // namespace cuda } // namespace onnxruntime
eab0f073a39bc5c171c9fc12940b217ef4522c33.cu
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "core/providers/cuda/cuda_common.h" #include "core/providers/cuda/cu_inc/elementwise_impl.cuh" namespace onnxruntime { namespace cuda { template <typename T, typename TLabel, typename TOut, bool IsWeighted, typename TIndex> struct OpSoftmaxCrossEntropyWeights { OpSoftmaxCrossEntropyWeights(const TLabel* label_data, const T* weight_data, TLabel C, TLabel ignore_index) : label_data_(label_data), weight_data_(weight_data), C_(C), ignore_index_(ignore_index) {} __device__ __inline__ TOut operator()(TIndex idx) const { if (label_data_[idx] != ignore_index_) { if (IsWeighted) { CUDA_KERNEL_ASSERT(label_data_[idx] >= 0 && label_data_[idx] < C_); return TOut(weight_data_[label_data_[idx]]); } return TOut(1.f); } return TOut(0.f); } const TLabel* label_data_; const T* weight_data_; TLabel C_; TLabel ignore_index_; }; template <typename T, typename TLabel, typename TOut> void ComputeSoftmaxCrossEntropyWeightsImpl(cudaStream_t stream, const TLabel* label, const T* weight, size_t count, size_t label_depth, int64_t ignore_index, TOut* weight_data_nd) { if (weight) { typedef OpSoftmaxCrossEntropyWeights<T, TLabel, TOut, true, size_t> OP_Type; OP_Type op(label, weight, static_cast<TLabel>(label_depth), static_cast<TLabel>(ignore_index)); LaunchElementwiseKernel<TOut, OP_Type, size_t>(stream, weight_data_nd, op, count); } else { typedef OpSoftmaxCrossEntropyWeights<T, TLabel, TOut, false, size_t> OP_Type; OP_Type op(label, nullptr, static_cast<TLabel>(label_depth), static_cast<TLabel>(ignore_index)); LaunchElementwiseKernel<TOut, OP_Type, size_t>(stream, weight_data_nd, op, count); } } #define INSTANTIATE_COMPUTE_SCE_WEIGHTS_IMPL(T, TLabel, TOut) \ template void ComputeSoftmaxCrossEntropyWeightsImpl(cudaStream_t stream, const TLabel* label, const T* weight, \ size_t count, size_t label_depth, int64_t ignore_index, \ TOut* weight_data_nd) INSTANTIATE_COMPUTE_SCE_WEIGHTS_IMPL(float, int32_t, float); INSTANTIATE_COMPUTE_SCE_WEIGHTS_IMPL(float, int64_t, float); INSTANTIATE_COMPUTE_SCE_WEIGHTS_IMPL(half, int32_t, float); INSTANTIATE_COMPUTE_SCE_WEIGHTS_IMPL(half, int64_t, float); INSTANTIATE_COMPUTE_SCE_WEIGHTS_IMPL(half, int64_t, half); INSTANTIATE_COMPUTE_SCE_WEIGHTS_IMPL(BFloat16, int64_t, BFloat16); #undef INSTANTIATE_COMPUTE_SCE_WEIGHTS_IMPL template <typename T, typename TAcc, typename TLabel, typename TIndex> struct OpWeightedSoftmaxCrossEntropyLoss { OpWeightedSoftmaxCrossEntropyLoss(const T* log_prob_data, const TLabel* label_data, const T* weight_data, const TAcc* normalize_factor_data, TLabel C, TLabel ignore_index) : log_prob_data_(log_prob_data), label_data_(label_data), weight_data_(weight_data), normalize_factor_data_(normalize_factor_data), C_(C), ignore_index_(ignore_index) {} __device__ __inline__ T operator()(TIndex idx) const { if (label_data_[idx] != ignore_index_) { CUDA_KERNEL_ASSERT(label_data_[idx] >= 0 && label_data_[idx] < C_); T ret = static_cast<T>(static_cast<TAcc>(-log_prob_data_[idx * C_ + label_data_[idx]] * weight_data_[idx]) / (*normalize_factor_data_)); return ret; } return T(0.f); } const T* log_prob_data_; const TLabel* label_data_; const T* weight_data_; const TAcc* normalize_factor_data_; TLabel C_; TLabel ignore_index_; }; template <typename T, typename TAcc, typename TLabel> void SoftmaxCrossEntropyLossImpl(cudaStream_t stream, const T* log_prob, const TLabel* label, const T* weight, const TAcc* normalize_factor, size_t count, size_t label_depth, int64_t ignore_index, T* output_data) { typedef OpWeightedSoftmaxCrossEntropyLoss<T, TAcc, TLabel, size_t> OP_Type; OP_Type op(log_prob, label, weight, normalize_factor, static_cast<TLabel>(label_depth), static_cast<TLabel>(ignore_index)); LaunchElementwiseKernel<T, OP_Type, size_t>(stream, output_data, op, count); } template <typename T, typename TAcc, typename TLabel, typename TOut, bool IsReductionNone, bool HasBias, typename TIndex> struct OpWeightedSoftmaxCrossEntropyLossGrad { OpWeightedSoftmaxCrossEntropyLossGrad(const T* dY_data, const T* log_prob_data, const TLabel* label_data, const T* weight_data, const TAcc* normalize_factor_data, const TOut* bias_data, TLabel C) : dY_data_(dY_data), log_prob_data_(log_prob_data), label_data_(label_data), weight_data_(weight_data), normalize_factor_data_(normalize_factor_data), bias_data_(bias_data), C_(C) { C_fdm_ = DivMod(static_cast<TIndex>(C)); } __device__ __inline__ TOut operator()(TIndex idx) const { // normalize_factor is sum of labels' weights. Because zero sum implies all weights are 0, the loss function should // be constant 0 and its corresponding gradient should be 0 as well. TAcc result = TAcc(0.f); if (*normalize_factor_data_ != TAcc(0.f)) { TIndex row, d; C_fdm_.divmod(idx, row, d); CUDA_KERNEL_ASSERT(weight_data_[row] == T(0.f) || (label_data_[row] >= 0 && label_data_[row] < C_)); result = static_cast<TAcc>((IsReductionNone ? dY_data_[row] : *dY_data_) * weight_data_[row]) * (_Exp(static_cast<TAcc>(log_prob_data_[idx])) - (TAcc)(d == label_data_[row])) / (*normalize_factor_data_); } return HasBias ? static_cast<TOut>(result + static_cast<TAcc>(bias_data_[idx])) : static_cast<TOut>(result); } const T* dY_data_; const T* log_prob_data_; const TLabel* label_data_; const T* weight_data_; const TAcc* normalize_factor_data_; const TOut* bias_data_; TLabel C_; DivMod<TIndex> C_fdm_; }; template <typename T, typename TAcc, typename TLabel, typename TOut> void SoftmaxCrossEntropyLossGradImpl(cudaStream_t stream, const T* dY, const T* log_prob, const TLabel* label, const T* weight, const TAcc* normalize_factor, const TOut* bias_data, size_t count, size_t label_depth, bool reduction_none, TOut* output_data) { #define LAUNCH_WEIGHTED_SOFTMAX_CROSS_ENTROPY_LOSS_GRAD_KERNEL(is_reduction_none, has_bias) \ uint64_t total_count = count * label_depth; \ if (total_count <= static_cast<uint64_t>(std::numeric_limits<int>::max())) { \ typedef OpWeightedSoftmaxCrossEntropyLossGrad<T, TAcc, TLabel, TOut, is_reduction_none, has_bias, int> OP_Type; \ OP_Type op(dY, log_prob, label, weight, normalize_factor, bias_data, static_cast<TLabel>(label_depth)); \ LaunchElementwiseKernel<TOut, OP_Type, int>(stream, output_data, op, static_cast<int>(total_count)); \ } else { \ typedef OpWeightedSoftmaxCrossEntropyLossGrad<T, TAcc, TLabel, TOut, is_reduction_none, has_bias, uint64_t> OP_Type; \ OP_Type op(dY, log_prob, label, weight, normalize_factor, bias_data, static_cast<TLabel>(label_depth)); \ LaunchElementwiseKernel<TOut, decltype(op), uint64_t>(stream, output_data, op, total_count); \ } if (reduction_none) { if (bias_data) { LAUNCH_WEIGHTED_SOFTMAX_CROSS_ENTROPY_LOSS_GRAD_KERNEL(true, true); } else { LAUNCH_WEIGHTED_SOFTMAX_CROSS_ENTROPY_LOSS_GRAD_KERNEL(true, false); } } else { if (bias_data) { LAUNCH_WEIGHTED_SOFTMAX_CROSS_ENTROPY_LOSS_GRAD_KERNEL(false, true); } else { LAUNCH_WEIGHTED_SOFTMAX_CROSS_ENTROPY_LOSS_GRAD_KERNEL(false, false); } } #undef LAUNCH_WEIGHTED_SOFTMAX_CROSS_ENTROPY_LOSS_GRAD_KERNEL } #define INSTANTIATE_SCE_LOSS_IMPL(T, TAcc, TLabel) \ template void SoftmaxCrossEntropyLossImpl(cudaStream_t stream, const T* log_prob, const TLabel* label, const T* weight, \ const TAcc* normalize_factor, size_t count, size_t label_depth, \ int64_t ignore_index, T* output_data); INSTANTIATE_SCE_LOSS_IMPL(float, float, int32_t); INSTANTIATE_SCE_LOSS_IMPL(float, float, int64_t); INSTANTIATE_SCE_LOSS_IMPL(half, float, int64_t); INSTANTIATE_SCE_LOSS_IMPL(BFloat16, float, int64_t); #undef INSTANTIATE_SCE_LOSS_IMPL #define INSTANTIATE_SCE_LOSS_GRAD_IMPL(T, TAcc, TLabel, TOut) \ template void SoftmaxCrossEntropyLossGradImpl(cudaStream_t stream, const T* dY, const T* log_prob, const TLabel* label, \ const T* weight, const TAcc* normalize_factor, const TOut* bias_data, \ size_t count, size_t label_depth, bool reducation_none, \ TOut* output_data) INSTANTIATE_SCE_LOSS_GRAD_IMPL(float, float, int32_t, float); INSTANTIATE_SCE_LOSS_GRAD_IMPL(float, float, int32_t, half); INSTANTIATE_SCE_LOSS_GRAD_IMPL(float, float, int64_t, float); INSTANTIATE_SCE_LOSS_GRAD_IMPL(float, float, int64_t, half); INSTANTIATE_SCE_LOSS_GRAD_IMPL(half, float, int64_t, half); INSTANTIATE_SCE_LOSS_GRAD_IMPL(BFloat16, float, int64_t, BFloat16); #undef INSTANTIATE_SCE_LOSS_GRAD_IMPL } // namespace cuda } // namespace onnxruntime
8488a1b3ad1e04ec9897ec7ff57b7c33bf86e535.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "kernel_utils.h" #include "constant.h" __global__ void d_exp_reduction(float* matrix, float* exp_matrix, int height, int width, float* result) { extern __shared__ float thread_sums[]; int offset; if (!(height % 32)) offset = blockIdx.x * height + threadIdx.x; else offset = blockIdx.x * ALIGN32(height) + threadIdx.x; float local_sum = 0; for (int i = 0; i < height / blockDim.x + 1 && offset < height; ++i, offset += blockDim.x) local_sum += exp_matrix[offset] = EXP_FUNC(matrix[offset]); thread_sums[threadIdx.x] = local_sum; __syncthreads(); if (blockDim.x > 512 && threadIdx.x < 512) thread_sums[threadIdx.x] = local_sum = local_sum + thread_sums[threadIdx.x + 512]; __syncthreads(); if (blockDim.x > 256 && threadIdx.x < 256) thread_sums[threadIdx.x] = local_sum = local_sum + thread_sums[threadIdx.x + 256]; __syncthreads(); if (blockDim.x > 128 && threadIdx.x < 128) thread_sums[threadIdx.x] = local_sum = local_sum + thread_sums[threadIdx.x + 128]; __syncthreads(); if (blockDim.x > 64 && threadIdx.x < 64) thread_sums[threadIdx.x] = local_sum = local_sum + thread_sums[threadIdx.x + 64]; __syncthreads(); if (blockDim.x > 32 && threadIdx.x < 32) thread_sums[threadIdx.x] = local_sum = local_sum + thread_sums[threadIdx.x + 32]; __syncthreads(); for (int i = warpSize / 2; i > 0; i /= 2) local_sum = __shfl_down_sync(0xFFFFFFFF, local_sum, i); if (threadIdx.x == 0) result[blockIdx.x] = local_sum; } __global__ void d_exp(float* array, int size) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < size) array[index] = exp(array[index]); } __global__ void d_softmax(float* matrix, int height, int width, float* exp_sum) { int size = ALIGN32(height) * width; int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < size) matrix[index] /= exp_sum[index / ALIGN32(height)]; } __global__ void d_w2o_update(float* weights, int height, int width, int batch_size, float* exp_vec, float* ouput, float* expected) { int size = ALIGN32(height) * width; int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < size) ; } // cpu call wrappers extern "C" void m_exp_reduction(float* matrix, float* exp_matrix, int height, int width, float* result) { hipLaunchKernelGGL(( d_exp_reduction), dim3(width), dim3(REDUCTION_THREADS), REDUCTION_THREADS * sizeof(float), 0, matrix, exp_matrix, height, width, result); } extern "C" void v_exp(float* array, int size) { hipLaunchKernelGGL(( d_exp), dim3(size / EXP_THREADS + 1), dim3(EXP_THREADS), 0, 0, array, size); } extern "C" void m_softmax(float* matrix, int height, int width, float* exp_sum) { hipLaunchKernelGGL(( d_softmax), dim3((ALIGN32(height) * width) / SOFTMAX_THREADS + 1), dim3(SOFTMAX_THREADS), 0, 0, matrix, height, width, exp_sum); } extern "C" void w2o_update(float* weights, int height, int width, int batch_size, float* exp_vec, float* ouput, float* expected) { hipLaunchKernelGGL(( d_w2o_update), dim3((ALIGN32(height) * width) / UPDATE_L1_THREADS + 1), dim3(UPDATE_L1_THREADS), 0, 0, weights, height, width, batch_size, exp_vec, ouput, expected); }
8488a1b3ad1e04ec9897ec7ff57b7c33bf86e535.cu
#include "kernel_utils.h" #include "constant.h" __global__ void d_exp_reduction(float* matrix, float* exp_matrix, int height, int width, float* result) { extern __shared__ float thread_sums[]; int offset; if (!(height % 32)) offset = blockIdx.x * height + threadIdx.x; else offset = blockIdx.x * ALIGN32(height) + threadIdx.x; float local_sum = 0; for (int i = 0; i < height / blockDim.x + 1 && offset < height; ++i, offset += blockDim.x) local_sum += exp_matrix[offset] = EXP_FUNC(matrix[offset]); thread_sums[threadIdx.x] = local_sum; __syncthreads(); if (blockDim.x > 512 && threadIdx.x < 512) thread_sums[threadIdx.x] = local_sum = local_sum + thread_sums[threadIdx.x + 512]; __syncthreads(); if (blockDim.x > 256 && threadIdx.x < 256) thread_sums[threadIdx.x] = local_sum = local_sum + thread_sums[threadIdx.x + 256]; __syncthreads(); if (blockDim.x > 128 && threadIdx.x < 128) thread_sums[threadIdx.x] = local_sum = local_sum + thread_sums[threadIdx.x + 128]; __syncthreads(); if (blockDim.x > 64 && threadIdx.x < 64) thread_sums[threadIdx.x] = local_sum = local_sum + thread_sums[threadIdx.x + 64]; __syncthreads(); if (blockDim.x > 32 && threadIdx.x < 32) thread_sums[threadIdx.x] = local_sum = local_sum + thread_sums[threadIdx.x + 32]; __syncthreads(); for (int i = warpSize / 2; i > 0; i /= 2) local_sum = __shfl_down_sync(0xFFFFFFFF, local_sum, i); if (threadIdx.x == 0) result[blockIdx.x] = local_sum; } __global__ void d_exp(float* array, int size) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < size) array[index] = exp(array[index]); } __global__ void d_softmax(float* matrix, int height, int width, float* exp_sum) { int size = ALIGN32(height) * width; int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < size) matrix[index] /= exp_sum[index / ALIGN32(height)]; } __global__ void d_w2o_update(float* weights, int height, int width, int batch_size, float* exp_vec, float* ouput, float* expected) { int size = ALIGN32(height) * width; int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < size) ; } // cpu call wrappers extern "C" void m_exp_reduction(float* matrix, float* exp_matrix, int height, int width, float* result) { d_exp_reduction<<<width, REDUCTION_THREADS, REDUCTION_THREADS * sizeof(float)>>>(matrix, exp_matrix, height, width, result); } extern "C" void v_exp(float* array, int size) { d_exp<<<size / EXP_THREADS + 1, EXP_THREADS>>>(array, size); } extern "C" void m_softmax(float* matrix, int height, int width, float* exp_sum) { d_softmax<<<(ALIGN32(height) * width) / SOFTMAX_THREADS + 1, SOFTMAX_THREADS>>>(matrix, height, width, exp_sum); } extern "C" void w2o_update(float* weights, int height, int width, int batch_size, float* exp_vec, float* ouput, float* expected) { d_w2o_update<<<(ALIGN32(height) * width) / UPDATE_L1_THREADS + 1, UPDATE_L1_THREADS>>>(weights, height, width, batch_size, exp_vec, ouput, expected); }
ab567cb91133f326dc1d5183b5ffddd0e11dcd0d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2019 @generated from sparse/blas/zparilut_kernels.cu, normal z -> d, Wed Jan 2 14:18:53 2019 */ #include "magmasparse_internal.h" #define PRECISION_d __global__ void magma_dparilut_L_kernel( const magma_int_t num_rows, const magma_index_t *A_row, const magma_index_t *A_col, const double * __restrict__ A_val, const magma_int_t L_nnz, const magma_index_t *L_row, const magma_index_t *L_rowidx, const magma_index_t *L_col, double *L_val, const magma_int_t U_nnz, const magma_index_t *U_row, const magma_index_t *U_rowidx, const magma_index_t *U_col, double *U_val) { int k = blockDim.x * blockIdx.x + threadIdx.x; double zero = MAGMA_D_MAKE(0.0, 0.0); int il, iu, jl, ju; if (k < L_nnz) { double s, sp; int row = L_rowidx[k]; int col = L_col[k]; // as we look at the lower triangular, // col<row, i.e. disregard last element in row if (row == col) { // end check whether part of L L_val[k] = MAGMA_D_ONE; // upper triangular has diagonal equal 1 } else { s = zero; // check whether A contains element in this location for (int i = A_row[row]; i<A_row[row+1]; i++) { if (A_col[i] == col) { s = A_val[i]; //break; } } //printf("k:%d row:%d val_A:%.2f\n", k, row, s); //now do the actual iteration il = L_row[row]; iu = U_row[col]; int endil = L_row[ row+1 ]; int endiu = U_row[ col+1 ]; do { sp = zero; jl = L_col[il]; ju = U_col[iu]; // avoid branching sp = ( jl == ju ) ? L_val[il] * U_val[iu] : sp; s = ( jl == ju ) ? s-sp : s; il = ( jl <= ju ) ? il+1 : il; iu = ( jl >= ju ) ? iu+1 : iu; } while (il < endil && iu < endiu); // undo the last operation (it must be the last) s += sp; // write back to location e L_val[k] = s / U_val[U_row[col+1]-1]; } } }// kernel __global__ void magma_dparilut_U_kernel( const magma_int_t num_rows, const magma_index_t *A_row, const magma_index_t *A_col, const double * __restrict__ A_val, const magma_int_t L_nnz, const magma_index_t *L_row, const magma_index_t *L_rowidx, const magma_index_t *L_col, double *L_val, const magma_int_t U_nnz, const magma_index_t *U_row, const magma_index_t *U_rowidx, const magma_index_t *U_col, double *U_val) { int k = blockDim.x * blockIdx.x + threadIdx.x; double zero = MAGMA_D_MAKE(0.0, 0.0); int il, iu, jl, ju; if (k < U_nnz) { double s, sp; int row = U_col[k]; int col = U_rowidx[k]; s = zero; // check whether A contains element in this location for (int i = A_row[row]; i<A_row[row+1]; i++) { if (A_col[i] == col) { s = A_val[i]; //break; } } //now do the actual iteration il = L_row[row]; iu = U_row[col]; int endil = L_row[ row+1 ]; int endiu = U_row[ col+1 ]; do { sp = zero; jl = L_col[il]; ju = U_col[iu]; // avoid branching sp = ( jl == ju ) ? L_val[il] * U_val[iu] : sp; s = ( jl == ju ) ? s-sp : s; il = ( jl <= ju ) ? il+1 : il; iu = ( jl >= ju ) ? iu+1 : iu; } while (il < endil && iu < endiu); // undo the last operation (it must be the last) s += sp; // write back to location e U_val[k] = s; } }// kernel /***************************************************************************//** Purpose ------- This function does an ParILUT sweep. The difference to the ParILU sweep is that the nonzero pattern of A and the incomplete factors L and U can be different. The pattern determing which elements are iterated are hence the pattern of L and U, not A. L has a unit diagonal. This is the GPU version of the asynchronous ParILUT sweep. Arguments --------- @param[in] A magma_d_matrix* System matrix. The format is sorted CSR. @param[in,out] L magma_d_matrix* Current approximation for the lower triangular factor The format is MAGMA_CSRCOO. This is sorted CSR plus the rowindexes being stored. @param[in,out] U magma_d_matrix* Current approximation for the lower triangular factor The format is MAGMA_CSRCOO. This is sorted CSR plus the rowindexes being stored. @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dgegpuk ********************************************************************/ extern "C" magma_int_t magma_dparilut_sweep_gpu( magma_d_matrix *A, magma_d_matrix *L, magma_d_matrix *U, magma_queue_t queue ) { int blocksize1 = 128; int blocksize2 = 1; int dimgrid11 = magma_ceildiv( L->nnz, blocksize1 ); int dimgrid12 = 1; int dimgrid13 = 1; dim3 grid1( dimgrid11, dimgrid12, dimgrid13 ); dim3 block1( blocksize1, blocksize2, 1 ); int dimgrid21 = magma_ceildiv( U->nnz, blocksize1 ); int dimgrid22 = 1; int dimgrid23 = 1; dim3 grid2( dimgrid21, dimgrid22, dimgrid23 ); dim3 block2( blocksize1, blocksize2, 1 ); // Runtime API // hipFuncCachePreferShared: shared memory is 48 KB // hipFuncCachePreferEqual: shared memory is 32 KB // hipFuncCachePreferL1: shared memory is 16 KB // hipFuncCachePreferNone: no preference //hipFuncSetCacheConfig(hipFuncCachePreferShared); hipDeviceSetCacheConfig( hipFuncCachePreferL1 ); hipLaunchKernelGGL(( magma_dparilut_L_kernel), dim3(grid1), dim3(block1), 0, queue->cuda_stream() , A->num_rows, A->drow, A->dcol, A->dval, L->nnz, L->drow, L->drowidx, L->dcol, L->dval, U->nnz, U->drow, U->drowidx, U->dcol, U->dval); hipLaunchKernelGGL(( magma_dparilut_U_kernel), dim3(grid2), dim3(block2), 0, queue->cuda_stream() , A->num_rows, A->drow, A->dcol, A->dval, L->nnz, L->drow, L->drowidx, L->dcol, L->dval, U->nnz, U->drow, U->drowidx, U->dcol, U->dval); return MAGMA_SUCCESS; } __global__ void magma_dparilut_residuals_kernel( const magma_int_t num_rows, const magma_index_t *A_row, const magma_index_t *A_col, const double * __restrict__ A_val, const magma_index_t *L_row, const magma_index_t *L_col, const double *L_val, const magma_index_t *U_row, const magma_index_t *U_col, const double *U_val, const magma_int_t R_nnz, const magma_index_t *R_rowidx, const magma_index_t *R_col, double *R_val) { int k = blockDim.x * blockIdx.x + threadIdx.x; double zero = MAGMA_D_MAKE(0.0, 0.0); double s, sp; int il, iu, jl, ju; if (k < R_nnz) { int row = R_rowidx[k]; int col = R_col[k]; // as we look at the lower triangular, // col<row, i.e. disregard last element in row s = zero; // check whether A contains element in this location for (int i = A_row[row]; i<A_row[row+1]; i++) { if (A_col[i] == col) { s = A_val[i]; break; } } //now do the actual iteration il = L_row[row]; iu = U_row[col]; int endil = L_row[ row+1 ]; int endiu = U_row[ col+1 ]; do { sp = zero; jl = L_col[il]; ju = U_col[iu]; // avoid branching sp = ( jl == ju ) ? L_val[il] * U_val[iu] : sp; s = ( jl == ju ) ? s-sp : s; il = ( jl <= ju ) ? il+1 : il; iu = ( jl >= ju ) ? iu+1 : iu; } while (il < endil && iu < endiu); // undo the last operation (it must be the last) s += sp; // write back to location e R_val[k] = s; } }// kernel /***************************************************************************//** Purpose ------- This function computes the ILU residual in the locations included in the sparsity pattern of R. Arguments --------- @param[in] A magma_d_matrix System matrix. The format is sorted CSR. @param[in] L magma_d_matrix Current approximation for the lower triangular factor The format is MAGMA_CSRCOO. This is sorted CSR plus the rowindexes being stored. @param[in] U magma_d_matrix Current approximation for the lower triangular factor The format is MAGMA_CSRCOO. This is sorted CSR plus the rowindexes being stored. @param[in,out] R magma_d_matrix* Sparsity pattern on which the ILU residual is computed. R is in COO format. On output, R contains the ILU residual. @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dgegpuk ********************************************************************/ extern "C" magma_int_t magma_dparilut_residuals_gpu( magma_d_matrix A, magma_d_matrix L, magma_d_matrix U, magma_d_matrix *R, magma_queue_t queue ) { int blocksize1 = 128; int blocksize2 = 1; int dimgrid11 = magma_ceildiv( R->nnz, blocksize1 ); int dimgrid12 = 1; int dimgrid13 = 1; dim3 grid1( dimgrid11, dimgrid12, dimgrid13 ); dim3 block1( blocksize1, blocksize2, 1 ); // Runtime API // hipFuncCachePreferShared: shared memory is 48 KB // hipFuncCachePreferEqual: shared memory is 32 KB // hipFuncCachePreferL1: shared memory is 16 KB // hipFuncCachePreferNone: no preference //hipFuncSetCacheConfig(hipFuncCachePreferShared); hipDeviceSetCacheConfig( hipFuncCachePreferL1 ); hipLaunchKernelGGL(( magma_dparilut_residuals_kernel), dim3(grid1), dim3(block1), 0, queue->cuda_stream(), A.num_rows, A.drow, A.dcol, A.dval, L.drow, L.dcol, L.dval, U.drow, U.dcol, U.dval, R->nnz, R->drowidx, R->dcol, R->dval); return MAGMA_SUCCESS; }
ab567cb91133f326dc1d5183b5ffddd0e11dcd0d.cu
/* -- MAGMA (version 2.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2019 @generated from sparse/blas/zparilut_kernels.cu, normal z -> d, Wed Jan 2 14:18:53 2019 */ #include "magmasparse_internal.h" #define PRECISION_d __global__ void magma_dparilut_L_kernel( const magma_int_t num_rows, const magma_index_t *A_row, const magma_index_t *A_col, const double * __restrict__ A_val, const magma_int_t L_nnz, const magma_index_t *L_row, const magma_index_t *L_rowidx, const magma_index_t *L_col, double *L_val, const magma_int_t U_nnz, const magma_index_t *U_row, const magma_index_t *U_rowidx, const magma_index_t *U_col, double *U_val) { int k = blockDim.x * blockIdx.x + threadIdx.x; double zero = MAGMA_D_MAKE(0.0, 0.0); int il, iu, jl, ju; if (k < L_nnz) { double s, sp; int row = L_rowidx[k]; int col = L_col[k]; // as we look at the lower triangular, // col<row, i.e. disregard last element in row if (row == col) { // end check whether part of L L_val[k] = MAGMA_D_ONE; // upper triangular has diagonal equal 1 } else { s = zero; // check whether A contains element in this location for (int i = A_row[row]; i<A_row[row+1]; i++) { if (A_col[i] == col) { s = A_val[i]; //break; } } //printf("k:%d row:%d val_A:%.2f\n", k, row, s); //now do the actual iteration il = L_row[row]; iu = U_row[col]; int endil = L_row[ row+1 ]; int endiu = U_row[ col+1 ]; do { sp = zero; jl = L_col[il]; ju = U_col[iu]; // avoid branching sp = ( jl == ju ) ? L_val[il] * U_val[iu] : sp; s = ( jl == ju ) ? s-sp : s; il = ( jl <= ju ) ? il+1 : il; iu = ( jl >= ju ) ? iu+1 : iu; } while (il < endil && iu < endiu); // undo the last operation (it must be the last) s += sp; // write back to location e L_val[k] = s / U_val[U_row[col+1]-1]; } } }// kernel __global__ void magma_dparilut_U_kernel( const magma_int_t num_rows, const magma_index_t *A_row, const magma_index_t *A_col, const double * __restrict__ A_val, const magma_int_t L_nnz, const magma_index_t *L_row, const magma_index_t *L_rowidx, const magma_index_t *L_col, double *L_val, const magma_int_t U_nnz, const magma_index_t *U_row, const magma_index_t *U_rowidx, const magma_index_t *U_col, double *U_val) { int k = blockDim.x * blockIdx.x + threadIdx.x; double zero = MAGMA_D_MAKE(0.0, 0.0); int il, iu, jl, ju; if (k < U_nnz) { double s, sp; int row = U_col[k]; int col = U_rowidx[k]; s = zero; // check whether A contains element in this location for (int i = A_row[row]; i<A_row[row+1]; i++) { if (A_col[i] == col) { s = A_val[i]; //break; } } //now do the actual iteration il = L_row[row]; iu = U_row[col]; int endil = L_row[ row+1 ]; int endiu = U_row[ col+1 ]; do { sp = zero; jl = L_col[il]; ju = U_col[iu]; // avoid branching sp = ( jl == ju ) ? L_val[il] * U_val[iu] : sp; s = ( jl == ju ) ? s-sp : s; il = ( jl <= ju ) ? il+1 : il; iu = ( jl >= ju ) ? iu+1 : iu; } while (il < endil && iu < endiu); // undo the last operation (it must be the last) s += sp; // write back to location e U_val[k] = s; } }// kernel /***************************************************************************//** Purpose ------- This function does an ParILUT sweep. The difference to the ParILU sweep is that the nonzero pattern of A and the incomplete factors L and U can be different. The pattern determing which elements are iterated are hence the pattern of L and U, not A. L has a unit diagonal. This is the GPU version of the asynchronous ParILUT sweep. Arguments --------- @param[in] A magma_d_matrix* System matrix. The format is sorted CSR. @param[in,out] L magma_d_matrix* Current approximation for the lower triangular factor The format is MAGMA_CSRCOO. This is sorted CSR plus the rowindexes being stored. @param[in,out] U magma_d_matrix* Current approximation for the lower triangular factor The format is MAGMA_CSRCOO. This is sorted CSR plus the rowindexes being stored. @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dgegpuk ********************************************************************/ extern "C" magma_int_t magma_dparilut_sweep_gpu( magma_d_matrix *A, magma_d_matrix *L, magma_d_matrix *U, magma_queue_t queue ) { int blocksize1 = 128; int blocksize2 = 1; int dimgrid11 = magma_ceildiv( L->nnz, blocksize1 ); int dimgrid12 = 1; int dimgrid13 = 1; dim3 grid1( dimgrid11, dimgrid12, dimgrid13 ); dim3 block1( blocksize1, blocksize2, 1 ); int dimgrid21 = magma_ceildiv( U->nnz, blocksize1 ); int dimgrid22 = 1; int dimgrid23 = 1; dim3 grid2( dimgrid21, dimgrid22, dimgrid23 ); dim3 block2( blocksize1, blocksize2, 1 ); // Runtime API // cudaFuncCachePreferShared: shared memory is 48 KB // cudaFuncCachePreferEqual: shared memory is 32 KB // cudaFuncCachePreferL1: shared memory is 16 KB // cudaFuncCachePreferNone: no preference //cudaFuncSetCacheConfig(cudaFuncCachePreferShared); cudaDeviceSetCacheConfig( cudaFuncCachePreferL1 ); magma_dparilut_L_kernel<<< grid1, block1, 0, queue->cuda_stream() >>> (A->num_rows, A->drow, A->dcol, A->dval, L->nnz, L->drow, L->drowidx, L->dcol, L->dval, U->nnz, U->drow, U->drowidx, U->dcol, U->dval); magma_dparilut_U_kernel<<< grid2, block2, 0, queue->cuda_stream() >>> (A->num_rows, A->drow, A->dcol, A->dval, L->nnz, L->drow, L->drowidx, L->dcol, L->dval, U->nnz, U->drow, U->drowidx, U->dcol, U->dval); return MAGMA_SUCCESS; } __global__ void magma_dparilut_residuals_kernel( const magma_int_t num_rows, const magma_index_t *A_row, const magma_index_t *A_col, const double * __restrict__ A_val, const magma_index_t *L_row, const magma_index_t *L_col, const double *L_val, const magma_index_t *U_row, const magma_index_t *U_col, const double *U_val, const magma_int_t R_nnz, const magma_index_t *R_rowidx, const magma_index_t *R_col, double *R_val) { int k = blockDim.x * blockIdx.x + threadIdx.x; double zero = MAGMA_D_MAKE(0.0, 0.0); double s, sp; int il, iu, jl, ju; if (k < R_nnz) { int row = R_rowidx[k]; int col = R_col[k]; // as we look at the lower triangular, // col<row, i.e. disregard last element in row s = zero; // check whether A contains element in this location for (int i = A_row[row]; i<A_row[row+1]; i++) { if (A_col[i] == col) { s = A_val[i]; break; } } //now do the actual iteration il = L_row[row]; iu = U_row[col]; int endil = L_row[ row+1 ]; int endiu = U_row[ col+1 ]; do { sp = zero; jl = L_col[il]; ju = U_col[iu]; // avoid branching sp = ( jl == ju ) ? L_val[il] * U_val[iu] : sp; s = ( jl == ju ) ? s-sp : s; il = ( jl <= ju ) ? il+1 : il; iu = ( jl >= ju ) ? iu+1 : iu; } while (il < endil && iu < endiu); // undo the last operation (it must be the last) s += sp; // write back to location e R_val[k] = s; } }// kernel /***************************************************************************//** Purpose ------- This function computes the ILU residual in the locations included in the sparsity pattern of R. Arguments --------- @param[in] A magma_d_matrix System matrix. The format is sorted CSR. @param[in] L magma_d_matrix Current approximation for the lower triangular factor The format is MAGMA_CSRCOO. This is sorted CSR plus the rowindexes being stored. @param[in] U magma_d_matrix Current approximation for the lower triangular factor The format is MAGMA_CSRCOO. This is sorted CSR plus the rowindexes being stored. @param[in,out] R magma_d_matrix* Sparsity pattern on which the ILU residual is computed. R is in COO format. On output, R contains the ILU residual. @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dgegpuk ********************************************************************/ extern "C" magma_int_t magma_dparilut_residuals_gpu( magma_d_matrix A, magma_d_matrix L, magma_d_matrix U, magma_d_matrix *R, magma_queue_t queue ) { int blocksize1 = 128; int blocksize2 = 1; int dimgrid11 = magma_ceildiv( R->nnz, blocksize1 ); int dimgrid12 = 1; int dimgrid13 = 1; dim3 grid1( dimgrid11, dimgrid12, dimgrid13 ); dim3 block1( blocksize1, blocksize2, 1 ); // Runtime API // cudaFuncCachePreferShared: shared memory is 48 KB // cudaFuncCachePreferEqual: shared memory is 32 KB // cudaFuncCachePreferL1: shared memory is 16 KB // cudaFuncCachePreferNone: no preference //cudaFuncSetCacheConfig(cudaFuncCachePreferShared); cudaDeviceSetCacheConfig( cudaFuncCachePreferL1 ); magma_dparilut_residuals_kernel<<<grid1, block1, 0, queue->cuda_stream()>>> (A.num_rows, A.drow, A.dcol, A.dval, L.drow, L.dcol, L.dval, U.drow, U.dcol, U.dval, R->nnz, R->drowidx, R->dcol, R->dval); return MAGMA_SUCCESS; }
7cbfc6dfd19f6b61b6edb9200d57e46d0ee2d7f5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright 2021-2022 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <stdint.h> #include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/crop_and_resize_grad_image_impl.cuh" #include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/util.cuh" #include "include/cuda_runtime.h" #include "include/hip/hip_fp16.h" template <typename T, typename G> __global__ void CropAndResizeGradImageForwardKernel(const int32_t size, const T *grads, const T *boxes, const int *box_ind, int32_t num_boxes, int32_t batch, int32_t image_height, int32_t image_width, int32_t crop_height, int32_t crop_width, int32_t depth, int method, G *grad_image) { for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size; pos += blockDim.x * gridDim.x) { // input format -> [num_boxes, crop_height, crop_width, depth] int pos_temp = pos; const int32_t pos_channel = pos_temp % depth; pos_temp = pos_temp / depth; const int32_t pos_x = pos_temp % crop_width; pos_temp = pos_temp / crop_width; const int32_t pos_y = pos_temp % crop_height; const int32_t pos_box_idx = pos_temp / crop_height; const T y1 = boxes[4 * pos_box_idx]; const T x1 = boxes[4 * pos_box_idx + 1]; const T y2 = boxes[4 * pos_box_idx + 2]; const T x2 = boxes[4 * pos_box_idx + 3]; const int32_t box_in_image = box_ind[pos_box_idx]; if (box_in_image < 0 || box_in_image >= batch) { continue; } const float height_scale = (crop_height > 1) ? (y2 - y1) * (image_height - 1) / (crop_height - 1) : 0; const float width_scale = (crop_width > 1) ? (x2 - x1) * (image_width - 1) / (crop_width - 1) : 0; float target_y = (crop_height > 1) ? y1 * (image_height - 1) + pos_y * height_scale : 0.5 * (y1 + y2) * (image_height - 1); float target_x = (crop_width > 1) ? x1 * (image_width - 1) + pos_x * width_scale : 0.5 * (x1 + x2) * (image_width - 1); if (target_y < 0 || target_y > image_height - 1) { continue; } if (target_x < 0 || target_x > image_width - 1) { continue; } if ((method == 1) || (method == 3)) { const int32_t top_y_index = floorf(target_y); const int32_t bottom_y_index = ceilf(target_y); const float y_lerp = target_y - top_y_index; const int32_t left_x_ind = floorf(target_x); const int32_t right_x_ind = ceilf(target_x); const float x_lerp = target_x - left_x_ind; // Compute the image gradient const float top_grad = (1 - y_lerp) * static_cast<float>(grads[pos]); MsAtomicAdd( grad_image + ((box_in_image * image_height + top_y_index) * image_width + left_x_ind) * depth + pos_channel, static_cast<G>((1 - x_lerp) * top_grad)); MsAtomicAdd( grad_image + ((box_in_image * image_height + top_y_index) * image_width + right_x_ind) * depth + pos_channel, static_cast<G>(x_lerp * top_grad)); const float bottom_grad = y_lerp * static_cast<float>(grads[pos]); MsAtomicAdd( grad_image + ((box_in_image * image_height + bottom_y_index) * image_width + left_x_ind) * depth + pos_channel, static_cast<G>((1 - x_lerp) * bottom_grad)); MsAtomicAdd( grad_image + ((box_in_image * image_height + bottom_y_index) * image_width + right_x_ind) * depth + pos_channel, static_cast<G>(x_lerp * bottom_grad)); } else { const int32_t closest_x_index = roundf(target_x); const int32_t closest_y_index = roundf(target_y); MsAtomicAdd(grad_image + ((box_in_image * image_height + closest_y_index) * image_width + closest_x_index) * depth + pos_channel, static_cast<G>(grads[pos])); } } return; } template <> __global__ void CropAndResizeGradImageForwardKernel(const int32_t size, const float *grads, const float *boxes, const int *box_ind, int32_t num_boxes, int32_t batch, int32_t image_height, int32_t image_width, int32_t crop_height, int32_t crop_width, int32_t depth, int method, half *grad_image) { for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size; pos += blockDim.x * gridDim.x) { // input format -> [num_boxes, crop_height, crop_width, depth] int pos_temp = pos; const int32_t pos_channel = pos_temp % depth; pos_temp = pos_temp / depth; const int32_t pos_x = pos_temp % crop_width; pos_temp = pos_temp / crop_width; const int32_t pos_y = pos_temp % crop_height; const int32_t pos_box_idx = pos_temp / crop_height; const float y1 = boxes[4 * pos_box_idx]; const float x1 = boxes[4 * pos_box_idx + 1]; const float y2 = boxes[4 * pos_box_idx + 2]; const float x2 = boxes[4 * pos_box_idx + 3]; const int32_t box_in_image = box_ind[pos_box_idx]; if (box_in_image < 0 || box_in_image >= batch) { continue; } const float height_scale = (crop_height > 1) ? (y2 - y1) * (image_height - 1) / (crop_height - 1) : 0; const float width_scale = (crop_width > 1) ? (x2 - x1) * (image_width - 1) / (crop_width - 1) : 0; float target_y = (crop_height > 1) ? y1 * (image_height - 1) + pos_y * height_scale : 0.5 * (y1 + y2) * (image_height - 1); float target_x = (crop_width > 1) ? x1 * (image_width - 1) + pos_x * width_scale : 0.5 * (x1 + x2) * (image_width - 1); if (target_y < 0 || target_y > image_height - 1) { continue; } if (target_x < 0 || target_x > image_width - 1) { continue; } if ((method == 1) || (method == 3)) { const int32_t top_y_index = floorf(target_y); const int32_t bottom_y_index = ceilf(target_y); const float y_lerp = target_y - top_y_index; const int32_t left_x_ind = floorf(target_x); const int32_t right_x_ind = ceilf(target_x); const float x_lerp = target_x - left_x_ind; // Compute the image gradient const float top_grad = (1 - y_lerp) * grads[pos]; MsAtomicAdd( grad_image + ((box_in_image * image_height + top_y_index) * image_width + left_x_ind) * depth + pos_channel, __float2half((1 - x_lerp) * top_grad)); MsAtomicAdd( grad_image + ((box_in_image * image_height + top_y_index) * image_width + right_x_ind) * depth + pos_channel, __float2half(x_lerp * top_grad)); const float bottom_grad = y_lerp * grads[pos]; MsAtomicAdd( grad_image + ((box_in_image * image_height + bottom_y_index) * image_width + left_x_ind) * depth + pos_channel, __float2half((1 - x_lerp) * bottom_grad)); MsAtomicAdd( grad_image + ((box_in_image * image_height + bottom_y_index) * image_width + right_x_ind) * depth + pos_channel, __float2half(x_lerp * bottom_grad)); } else { const int32_t closest_x_index = roundf(target_x); const int32_t closest_y_index = roundf(target_y); MsAtomicAdd(grad_image + ((box_in_image * image_height + closest_y_index) * image_width + closest_x_index) * depth + pos_channel, __float2half(grads[pos])); } } return; } template <> __global__ void CropAndResizeGradImageForwardKernel(const int32_t size, const double *grads, const double *boxes, const int *box_ind, int32_t num_boxes, int32_t batch, int32_t image_height, int32_t image_width, int32_t crop_height, int32_t crop_width, int32_t depth, int method, half *grad_image) { for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size; pos += blockDim.x * gridDim.x) { // input format -> [num_boxes, crop_height, crop_width, depth] int pos_temp = pos; const int32_t pos_channel = pos_temp % depth; pos_temp = pos_temp / depth; const int32_t pos_x = pos_temp % crop_width; pos_temp = pos_temp / crop_width; const int32_t pos_y = pos_temp % crop_height; const int32_t pos_box_idx = pos_temp / crop_height; const double y1 = boxes[4 * pos_box_idx]; const double x1 = boxes[4 * pos_box_idx + 1]; const double y2 = boxes[4 * pos_box_idx + 2]; const double x2 = boxes[4 * pos_box_idx + 3]; const int32_t box_in_image = box_ind[pos_box_idx]; if (box_in_image < 0 || box_in_image >= batch) { continue; } const double height_scale = (crop_height > 1) ? (y2 - y1) * (image_height - 1) / (crop_height - 1) : 0; const double width_scale = (crop_width > 1) ? (x2 - x1) * (image_width - 1) / (crop_width - 1) : 0; double target_y = (crop_height > 1) ? y1 * (image_height - 1) + pos_y * height_scale : 0.5 * (y1 + y2) * (image_height - 1); double target_x = (crop_width > 1) ? x1 * (image_width - 1) + pos_x * width_scale : 0.5 * (x1 + x2) * (image_width - 1); if (target_y < 0 || target_y > image_height - 1) { continue; } if (target_x < 0 || target_x > image_width - 1) { continue; } if ((method == 1) || (method == 3)) { const int32_t top_y_index = floorf(target_y); const int32_t bottom_y_index = ceilf(target_y); const float y_lerp = static_cast<float>(target_y - top_y_index); const int32_t left_x_ind = floorf(target_x); const int32_t right_x_ind = ceilf(target_x); const float x_lerp = static_cast<float>(target_x - left_x_ind); // Compute the image gradient const float top_grad = (1 - y_lerp) * static_cast<float>(grads[pos]); MsAtomicAdd( grad_image + ((box_in_image * image_height + top_y_index) * image_width + left_x_ind) * depth + pos_channel, __float2half((1 - x_lerp) * top_grad)); MsAtomicAdd( grad_image + ((box_in_image * image_height + top_y_index) * image_width + right_x_ind) * depth + pos_channel, __float2half(x_lerp * top_grad)); const float bottom_grad = y_lerp * static_cast<float>(grads[pos]); MsAtomicAdd( grad_image + ((box_in_image * image_height + bottom_y_index) * image_width + left_x_ind) * depth + pos_channel, __float2half((1 - x_lerp) * bottom_grad)); MsAtomicAdd( grad_image + ((box_in_image * image_height + bottom_y_index) * image_width + right_x_ind) * depth + pos_channel, __float2half(x_lerp * bottom_grad)); } else { const int32_t closest_x_index = roundf(target_x); const int32_t closest_y_index = roundf(target_y); MsAtomicAdd(grad_image + ((box_in_image * image_height + closest_y_index) * image_width + closest_x_index) * depth + pos_channel, __float2half(static_cast<float>(grads[pos]))); } } return; } template <typename G> __global__ void Reset_zero(const int size, G *list) { for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size; pos += blockDim.x * gridDim.x) { const G replace_element = 0.; list[pos] = replace_element; } return; } template <typename T, typename G> void CalCropAndResizeGradImage(const int32_t size, const T *grads, const T *boxes, const int *box_ind, int32_t num_boxes, int32_t batch, int32_t image_height, int32_t image_width, int32_t crop_height, int32_t crop_width, int32_t depth, int method, G *grad_image, const uint32_t &device_id, hipStream_t cuda_stream) { int zero_threads_num = static_cast<int>(batch * image_height * image_width * depth); hipLaunchKernelGGL(( Reset_zero), dim3(CUDA_BLOCKS(device_id, zero_threads_num)), dim3(CUDA_THREADS(device_id)), 0, cuda_stream, zero_threads_num, grad_image); hipLaunchKernelGGL(( CropAndResizeGradImageForwardKernel), dim3(CUDA_BLOCKS(device_id, static_cast<int>(size))), dim3(CUDA_THREADS(device_id)), 0, cuda_stream, size, grads, boxes, box_ind, num_boxes, batch, image_height, image_width, crop_height, crop_width, depth, method, grad_image); return; } template CUDA_LIB_EXPORT void CalCropAndResizeGradImage<float, half>( const int32_t size, const float *grads, const float *boxes, const int32_t *box_ind, int32_t num_boxes, int32_t batch, int32_t image_height, int32_t image_width, int32_t crop_height, int32_t crop_width, int32_t depth, int method, half *grad_image, const uint32_t &device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void CalCropAndResizeGradImage<float, float>( const int32_t size, const float *grads, const float *boxes, const int32_t *box_ind, int32_t num_boxes, int32_t batch, int32_t image_height, int32_t image_width, int32_t crop_height, int32_t crop_width, int32_t depth, int method, float *grad_image, const uint32_t &device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void CalCropAndResizeGradImage<float, double>( const int32_t size, const float *grads, const float *boxes, const int32_t *box_ind, int32_t num_boxes, int32_t batch, int32_t image_height, int32_t image_width, int32_t crop_height, int32_t crop_width, int32_t depth, int method, double *grad_image, const uint32_t &device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void CalCropAndResizeGradImage<double, half>( const int32_t size, const double *grads, const double *boxes, const int32_t *box_ind, int32_t num_boxes, int32_t batch, int32_t image_height, int32_t image_width, int32_t crop_height, int32_t crop_width, int32_t depth, int method, half *grad_image, const uint32_t &device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void CalCropAndResizeGradImage<double, float>( const int32_t size, const double *grads, const double *boxes, const int32_t *box_ind, int32_t num_boxes, int32_t batch, int32_t image_height, int32_t image_width, int32_t crop_height, int32_t crop_width, int32_t depth, int method, float *grad_image, const uint32_t &device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void CalCropAndResizeGradImage<double, double>( const int32_t size, const double *grads, const double *boxes, const int32_t *box_ind, int32_t num_boxes, int32_t batch, int32_t image_height, int32_t image_width, int32_t crop_height, int32_t crop_width, int32_t depth, int method, double *grad_image, const uint32_t &device_id, hipStream_t cuda_stream);
7cbfc6dfd19f6b61b6edb9200d57e46d0ee2d7f5.cu
/** * Copyright 2021-2022 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <stdint.h> #include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/crop_and_resize_grad_image_impl.cuh" #include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/util.cuh" #include "include/cuda_runtime.h" #include "include/cuda_fp16.h" template <typename T, typename G> __global__ void CropAndResizeGradImageForwardKernel(const int32_t size, const T *grads, const T *boxes, const int *box_ind, int32_t num_boxes, int32_t batch, int32_t image_height, int32_t image_width, int32_t crop_height, int32_t crop_width, int32_t depth, int method, G *grad_image) { for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size; pos += blockDim.x * gridDim.x) { // input format -> [num_boxes, crop_height, crop_width, depth] int pos_temp = pos; const int32_t pos_channel = pos_temp % depth; pos_temp = pos_temp / depth; const int32_t pos_x = pos_temp % crop_width; pos_temp = pos_temp / crop_width; const int32_t pos_y = pos_temp % crop_height; const int32_t pos_box_idx = pos_temp / crop_height; const T y1 = boxes[4 * pos_box_idx]; const T x1 = boxes[4 * pos_box_idx + 1]; const T y2 = boxes[4 * pos_box_idx + 2]; const T x2 = boxes[4 * pos_box_idx + 3]; const int32_t box_in_image = box_ind[pos_box_idx]; if (box_in_image < 0 || box_in_image >= batch) { continue; } const float height_scale = (crop_height > 1) ? (y2 - y1) * (image_height - 1) / (crop_height - 1) : 0; const float width_scale = (crop_width > 1) ? (x2 - x1) * (image_width - 1) / (crop_width - 1) : 0; float target_y = (crop_height > 1) ? y1 * (image_height - 1) + pos_y * height_scale : 0.5 * (y1 + y2) * (image_height - 1); float target_x = (crop_width > 1) ? x1 * (image_width - 1) + pos_x * width_scale : 0.5 * (x1 + x2) * (image_width - 1); if (target_y < 0 || target_y > image_height - 1) { continue; } if (target_x < 0 || target_x > image_width - 1) { continue; } if ((method == 1) || (method == 3)) { const int32_t top_y_index = floorf(target_y); const int32_t bottom_y_index = ceilf(target_y); const float y_lerp = target_y - top_y_index; const int32_t left_x_ind = floorf(target_x); const int32_t right_x_ind = ceilf(target_x); const float x_lerp = target_x - left_x_ind; // Compute the image gradient const float top_grad = (1 - y_lerp) * static_cast<float>(grads[pos]); MsAtomicAdd( grad_image + ((box_in_image * image_height + top_y_index) * image_width + left_x_ind) * depth + pos_channel, static_cast<G>((1 - x_lerp) * top_grad)); MsAtomicAdd( grad_image + ((box_in_image * image_height + top_y_index) * image_width + right_x_ind) * depth + pos_channel, static_cast<G>(x_lerp * top_grad)); const float bottom_grad = y_lerp * static_cast<float>(grads[pos]); MsAtomicAdd( grad_image + ((box_in_image * image_height + bottom_y_index) * image_width + left_x_ind) * depth + pos_channel, static_cast<G>((1 - x_lerp) * bottom_grad)); MsAtomicAdd( grad_image + ((box_in_image * image_height + bottom_y_index) * image_width + right_x_ind) * depth + pos_channel, static_cast<G>(x_lerp * bottom_grad)); } else { const int32_t closest_x_index = roundf(target_x); const int32_t closest_y_index = roundf(target_y); MsAtomicAdd(grad_image + ((box_in_image * image_height + closest_y_index) * image_width + closest_x_index) * depth + pos_channel, static_cast<G>(grads[pos])); } } return; } template <> __global__ void CropAndResizeGradImageForwardKernel(const int32_t size, const float *grads, const float *boxes, const int *box_ind, int32_t num_boxes, int32_t batch, int32_t image_height, int32_t image_width, int32_t crop_height, int32_t crop_width, int32_t depth, int method, half *grad_image) { for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size; pos += blockDim.x * gridDim.x) { // input format -> [num_boxes, crop_height, crop_width, depth] int pos_temp = pos; const int32_t pos_channel = pos_temp % depth; pos_temp = pos_temp / depth; const int32_t pos_x = pos_temp % crop_width; pos_temp = pos_temp / crop_width; const int32_t pos_y = pos_temp % crop_height; const int32_t pos_box_idx = pos_temp / crop_height; const float y1 = boxes[4 * pos_box_idx]; const float x1 = boxes[4 * pos_box_idx + 1]; const float y2 = boxes[4 * pos_box_idx + 2]; const float x2 = boxes[4 * pos_box_idx + 3]; const int32_t box_in_image = box_ind[pos_box_idx]; if (box_in_image < 0 || box_in_image >= batch) { continue; } const float height_scale = (crop_height > 1) ? (y2 - y1) * (image_height - 1) / (crop_height - 1) : 0; const float width_scale = (crop_width > 1) ? (x2 - x1) * (image_width - 1) / (crop_width - 1) : 0; float target_y = (crop_height > 1) ? y1 * (image_height - 1) + pos_y * height_scale : 0.5 * (y1 + y2) * (image_height - 1); float target_x = (crop_width > 1) ? x1 * (image_width - 1) + pos_x * width_scale : 0.5 * (x1 + x2) * (image_width - 1); if (target_y < 0 || target_y > image_height - 1) { continue; } if (target_x < 0 || target_x > image_width - 1) { continue; } if ((method == 1) || (method == 3)) { const int32_t top_y_index = floorf(target_y); const int32_t bottom_y_index = ceilf(target_y); const float y_lerp = target_y - top_y_index; const int32_t left_x_ind = floorf(target_x); const int32_t right_x_ind = ceilf(target_x); const float x_lerp = target_x - left_x_ind; // Compute the image gradient const float top_grad = (1 - y_lerp) * grads[pos]; MsAtomicAdd( grad_image + ((box_in_image * image_height + top_y_index) * image_width + left_x_ind) * depth + pos_channel, __float2half((1 - x_lerp) * top_grad)); MsAtomicAdd( grad_image + ((box_in_image * image_height + top_y_index) * image_width + right_x_ind) * depth + pos_channel, __float2half(x_lerp * top_grad)); const float bottom_grad = y_lerp * grads[pos]; MsAtomicAdd( grad_image + ((box_in_image * image_height + bottom_y_index) * image_width + left_x_ind) * depth + pos_channel, __float2half((1 - x_lerp) * bottom_grad)); MsAtomicAdd( grad_image + ((box_in_image * image_height + bottom_y_index) * image_width + right_x_ind) * depth + pos_channel, __float2half(x_lerp * bottom_grad)); } else { const int32_t closest_x_index = roundf(target_x); const int32_t closest_y_index = roundf(target_y); MsAtomicAdd(grad_image + ((box_in_image * image_height + closest_y_index) * image_width + closest_x_index) * depth + pos_channel, __float2half(grads[pos])); } } return; } template <> __global__ void CropAndResizeGradImageForwardKernel(const int32_t size, const double *grads, const double *boxes, const int *box_ind, int32_t num_boxes, int32_t batch, int32_t image_height, int32_t image_width, int32_t crop_height, int32_t crop_width, int32_t depth, int method, half *grad_image) { for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size; pos += blockDim.x * gridDim.x) { // input format -> [num_boxes, crop_height, crop_width, depth] int pos_temp = pos; const int32_t pos_channel = pos_temp % depth; pos_temp = pos_temp / depth; const int32_t pos_x = pos_temp % crop_width; pos_temp = pos_temp / crop_width; const int32_t pos_y = pos_temp % crop_height; const int32_t pos_box_idx = pos_temp / crop_height; const double y1 = boxes[4 * pos_box_idx]; const double x1 = boxes[4 * pos_box_idx + 1]; const double y2 = boxes[4 * pos_box_idx + 2]; const double x2 = boxes[4 * pos_box_idx + 3]; const int32_t box_in_image = box_ind[pos_box_idx]; if (box_in_image < 0 || box_in_image >= batch) { continue; } const double height_scale = (crop_height > 1) ? (y2 - y1) * (image_height - 1) / (crop_height - 1) : 0; const double width_scale = (crop_width > 1) ? (x2 - x1) * (image_width - 1) / (crop_width - 1) : 0; double target_y = (crop_height > 1) ? y1 * (image_height - 1) + pos_y * height_scale : 0.5 * (y1 + y2) * (image_height - 1); double target_x = (crop_width > 1) ? x1 * (image_width - 1) + pos_x * width_scale : 0.5 * (x1 + x2) * (image_width - 1); if (target_y < 0 || target_y > image_height - 1) { continue; } if (target_x < 0 || target_x > image_width - 1) { continue; } if ((method == 1) || (method == 3)) { const int32_t top_y_index = floorf(target_y); const int32_t bottom_y_index = ceilf(target_y); const float y_lerp = static_cast<float>(target_y - top_y_index); const int32_t left_x_ind = floorf(target_x); const int32_t right_x_ind = ceilf(target_x); const float x_lerp = static_cast<float>(target_x - left_x_ind); // Compute the image gradient const float top_grad = (1 - y_lerp) * static_cast<float>(grads[pos]); MsAtomicAdd( grad_image + ((box_in_image * image_height + top_y_index) * image_width + left_x_ind) * depth + pos_channel, __float2half((1 - x_lerp) * top_grad)); MsAtomicAdd( grad_image + ((box_in_image * image_height + top_y_index) * image_width + right_x_ind) * depth + pos_channel, __float2half(x_lerp * top_grad)); const float bottom_grad = y_lerp * static_cast<float>(grads[pos]); MsAtomicAdd( grad_image + ((box_in_image * image_height + bottom_y_index) * image_width + left_x_ind) * depth + pos_channel, __float2half((1 - x_lerp) * bottom_grad)); MsAtomicAdd( grad_image + ((box_in_image * image_height + bottom_y_index) * image_width + right_x_ind) * depth + pos_channel, __float2half(x_lerp * bottom_grad)); } else { const int32_t closest_x_index = roundf(target_x); const int32_t closest_y_index = roundf(target_y); MsAtomicAdd(grad_image + ((box_in_image * image_height + closest_y_index) * image_width + closest_x_index) * depth + pos_channel, __float2half(static_cast<float>(grads[pos]))); } } return; } template <typename G> __global__ void Reset_zero(const int size, G *list) { for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size; pos += blockDim.x * gridDim.x) { const G replace_element = 0.; list[pos] = replace_element; } return; } template <typename T, typename G> void CalCropAndResizeGradImage(const int32_t size, const T *grads, const T *boxes, const int *box_ind, int32_t num_boxes, int32_t batch, int32_t image_height, int32_t image_width, int32_t crop_height, int32_t crop_width, int32_t depth, int method, G *grad_image, const uint32_t &device_id, cudaStream_t cuda_stream) { int zero_threads_num = static_cast<int>(batch * image_height * image_width * depth); Reset_zero<<<CUDA_BLOCKS(device_id, zero_threads_num), CUDA_THREADS(device_id), 0, cuda_stream>>>(zero_threads_num, grad_image); CropAndResizeGradImageForwardKernel<<<CUDA_BLOCKS(device_id, static_cast<int>(size)), CUDA_THREADS(device_id), 0, cuda_stream>>>(size, grads, boxes, box_ind, num_boxes, batch, image_height, image_width, crop_height, crop_width, depth, method, grad_image); return; } template CUDA_LIB_EXPORT void CalCropAndResizeGradImage<float, half>( const int32_t size, const float *grads, const float *boxes, const int32_t *box_ind, int32_t num_boxes, int32_t batch, int32_t image_height, int32_t image_width, int32_t crop_height, int32_t crop_width, int32_t depth, int method, half *grad_image, const uint32_t &device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void CalCropAndResizeGradImage<float, float>( const int32_t size, const float *grads, const float *boxes, const int32_t *box_ind, int32_t num_boxes, int32_t batch, int32_t image_height, int32_t image_width, int32_t crop_height, int32_t crop_width, int32_t depth, int method, float *grad_image, const uint32_t &device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void CalCropAndResizeGradImage<float, double>( const int32_t size, const float *grads, const float *boxes, const int32_t *box_ind, int32_t num_boxes, int32_t batch, int32_t image_height, int32_t image_width, int32_t crop_height, int32_t crop_width, int32_t depth, int method, double *grad_image, const uint32_t &device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void CalCropAndResizeGradImage<double, half>( const int32_t size, const double *grads, const double *boxes, const int32_t *box_ind, int32_t num_boxes, int32_t batch, int32_t image_height, int32_t image_width, int32_t crop_height, int32_t crop_width, int32_t depth, int method, half *grad_image, const uint32_t &device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void CalCropAndResizeGradImage<double, float>( const int32_t size, const double *grads, const double *boxes, const int32_t *box_ind, int32_t num_boxes, int32_t batch, int32_t image_height, int32_t image_width, int32_t crop_height, int32_t crop_width, int32_t depth, int method, float *grad_image, const uint32_t &device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void CalCropAndResizeGradImage<double, double>( const int32_t size, const double *grads, const double *boxes, const int32_t *box_ind, int32_t num_boxes, int32_t batch, int32_t image_height, int32_t image_width, int32_t crop_height, int32_t crop_width, int32_t depth, int method, double *grad_image, const uint32_t &device_id, cudaStream_t cuda_stream);
a1d14987c349cdcfd1ed122f89188b9ccc4f9df0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <vector> #include "caffe/filler.hpp" #include "caffe/layers/class_distance_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { ////////////////////////////////////////// template <typename Dtype> static __global__ void compute_top_l2(const int nthreads, const int N_, const int K_, const Dtype *bottom_data, Dtype *top_data, const Dtype *weight, const Dtype *sigma, Dtype *dist, bool ignore_zero, bool isotropic) { if (ignore_zero) { CUDA_KERNEL_LOOP(index, nthreads) { const int i = index / N_; const int j = index % N_; Dtype t = 0; for (int k = 0; k < K_; ++k) if (bottom_data[i*K_ + k]) { Dtype d = weight[j*K_ + k] - bottom_data[i*K_ + k]; t += d*d; } dist[index] = t; top_data[index] = Dtype(-0.5) * t / (max(sigma[j], Dtype(0)) + Dtype(0.0001)); } } else { CUDA_KERNEL_LOOP(index, nthreads) { const int i = index / N_; const int j = index % N_; Dtype t = 0; for (int k = 0; k < K_; ++k) { Dtype d = weight[j*K_ + k] - bottom_data[i*K_ + k]; if (isotropic) t += d*d; else t += d*d/max(sigma[j*K_ + k], Dtype(0) + Dtype(0.00000001)); } dist[index] = t; // only useful for 'isotropic' if (isotropic) top_data[index] = Dtype(-0.5) * t / (max(sigma[j], Dtype(0)) + Dtype(0.0001)); else top_data[index] = Dtype(-0.5) * t; } } } template <typename Dtype> static __global__ void compute_top_ip(const int nthreads, const int N_, const int K_, const Dtype *bottom_data, Dtype *top_data, const Dtype *weight) { CUDA_KERNEL_LOOP(index, nthreads) { const int i = index / N_; const int j = index % N_; Dtype t = 0; for (int k = 0; k < K_; ++k) { t += weight[j*K_ + k] * bottom_data[i*K_ + k]; } top_data[index] = t; } } template <typename Dtype> static __global__ void compute_top_l1(const int nthreads, const int N_, const int K_, const Dtype *bottom_data, Dtype *top_data, const Dtype *weight, bool ignore_zero) { if (ignore_zero) { CUDA_KERNEL_LOOP(index, nthreads) { const int i = index / N_; const int j = index % N_; Dtype t = 0; for (int k = 0; k < K_; ++k) { Dtype d = weight[j*K_ + k] - bottom_data[i*K_ + k]; t += abs(d); } top_data[index] = -t; } } else { CUDA_KERNEL_LOOP(index, nthreads) { const int i = index / N_; const int j = index % N_; Dtype t = 0; for (int k = 0; k < K_; ++k) { Dtype d = weight[j*K_ + k] - bottom_data[i*K_ + k]; t += abs(d); } top_data[index] = -t; } } } ////////////////////////////////////////// template <typename Dtype> static __global__ void margin_top(const int M_, const int N_, Dtype *top_data, const Dtype *label, const Dtype margin_mul, const Dtype margin_add) { CUDA_KERNEL_LOOP(i, M_) { const int y = (int)label[i]; top_data[i*N_ + y] += top_data[i*N_ + y] * margin_mul - margin_add; } } ////////////////////////////////////////// template <typename Dtype> void ClassDistanceLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const Dtype* weight = this->blobs_[0]->gpu_data(); const Dtype* sigma = this->blobs_[1]->gpu_data(); Dtype* dist = dist_.mutable_gpu_data(); const ClassDistanceParameter& param = this->layer_param_.class_distance_param(); bool isotropic = param.isotropic(); switch (param.metric()) { case ClassDistanceParameter_Metric_L2: compute_top_l2<Dtype> << <CAFFE_GET_BLOCKS(M_*N_), CAFFE_CUDA_NUM_THREADS >> >( M_*N_, N_, K_, bottom_data, top_data, weight, sigma, dist, param.ignore_zero() & (this->phase_ == TRAIN), isotropic); break; case ClassDistanceParameter_Metric_IP: compute_top_ip<Dtype> << <CAFFE_GET_BLOCKS(M_*N_), CAFFE_CUDA_NUM_THREADS >> >( M_*N_, N_, K_, bottom_data, top_data, weight); break; case ClassDistanceParameter_Metric_L1: compute_top_l1<Dtype> << <CAFFE_GET_BLOCKS(M_*N_), CAFFE_CUDA_NUM_THREADS >> >( M_*N_, N_, K_, bottom_data, top_data, weight, param.ignore_zero() & (this->phase_ == TRAIN)); break; } if (bottom.size() == 2 && this->phase_ == TRAIN) { Dtype margin_mul_ = this->m_mul_.get_iter("mul_margin"); Dtype margin_add_ = this->m_add_.get_iter("add_margin"); const Dtype* label = bottom[1]->gpu_data(); margin_top<Dtype> << <CAFFE_GET_BLOCKS(M_), CAFFE_CUDA_NUM_THREADS >> >( M_, N_, top_data, label, margin_mul_, margin_add_); } // validate that sigma > 0 const Dtype *sigma_cpu = this->blobs_[1]->cpu_data(); const int sigma_number = isotropic?N_:(N_*K_); for(int i=0; i<sigma_number; i++) if (sigma_cpu[i] <= eps_) { LOG(INFO) << "Dangerous sigma value, sigma[" << i << "]=" << sigma_cpu[i]; break; } /*if (bias_term_) { caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, N_, 1, (Dtype)1., bias_multiplier_.gpu_data(), this->blobs_[1]->gpu_data(), (Dtype)1., top_data); }*/ } //========================================== template <typename Dtype> static __global__ void compute_gradient_bottom_label_l2(const int nthreads, const int K_, const int N_, const Dtype* top_diff, const Dtype *bottom_data, Dtype *bottom_diff, const Dtype *weight, const Dtype *label, const Dtype margin_mul, const Dtype center_coef, const Dtype *sigma, bool ignore_zero) { CUDA_KERNEL_LOOP(index, nthreads) { const int i = index / K_; const int k = index % K_; Dtype t = 0; for (int j = 0; j < N_; ++j) if (!ignore_zero || bottom_data[index]) { if (j == (int)label[i]) t += (weight[j*K_ + k] - bottom_data[index]) * (margin_mul / (max(sigma[j], Dtype(0)) + Dtype(0.0001)) * top_diff[i*N_ + j] - center_coef); else t += (weight[j*K_ + k] - bottom_data[index]) / (max(sigma[j], Dtype(0)) + Dtype(0.0001)) * top_diff[i*N_ + j]; } bottom_diff[index] = t; } } template <typename Dtype> static __global__ void compute_gradient_weight_label_l2(const int nthreads, const int K_, const int M_, const Dtype* top_diff, const Dtype *bottom_data, const Dtype *weight, Dtype* weight_diff, const Dtype* label, const Dtype margin_mul, const Dtype center_coef, const Dtype *sigma, Dtype *sigma_diff, const Dtype *dist, bool update_sigma, bool ignore_zero) { const int N_ = nthreads / K_; CUDA_KERNEL_LOOP(index, nthreads) { const int j = index / K_; const int k = index % K_; Dtype t = 0; Dtype t_sigma = 0; for (int i = 0; i < M_; ++i) if (!ignore_zero || bottom_data[i*K_ + k]) { if (j == (int)label[i]){ t += (bottom_data[i*K_ + k] - weight[index]) * (margin_mul / (max(sigma[j], Dtype(0)) + Dtype(0.0001)) * top_diff[i*N_ + j] - center_coef); if (update_sigma && k==0) t_sigma += dist[i * N_ + j] * margin_mul / (Dtype(2.0) * (max(sigma[j], Dtype(0)) + Dtype(0.0001)) * sigma[j]) * top_diff[i*N_ + j]; } else{ t += (bottom_data[i*K_ + k] - weight[index]) / (max(sigma[j], Dtype(0)) + Dtype(0.0001)) * top_diff[i*N_ + j]; if (update_sigma && k==0) t_sigma += dist[i * N_ + j] / (Dtype(2.0) * (max(sigma[j], Dtype(0)) + Dtype(0.0001)) * sigma[j]) * top_diff[i*N_ + j]; } } weight_diff[index] += t; if (update_sigma && k == 0) sigma_diff[j] += t_sigma; } } template <typename Dtype> static __global__ void compute_gradient_bottom_label_l2_diag(const int nthreads, const int K_, const int N_, const Dtype* top_diff, const Dtype *bottom_data, Dtype *bottom_diff, const Dtype *weight, const Dtype *label, const Dtype margin_mul, const Dtype center_coef, const Dtype *sigma, bool ignore_zero) { CUDA_KERNEL_LOOP(index, nthreads) { const int i = index / K_; const int k = index % K_; Dtype t = 0; for (int j = 0; j < N_; ++j) if (!ignore_zero || bottom_data[index]) { if (j == (int)label[i]) t += (weight[j*K_ + k] - bottom_data[index]) * (margin_mul / (max(sigma[j*K_ + k] , Dtype(0)) + Dtype(0.00000001)) * top_diff[i*N_ + j] - center_coef); else t += (weight[j*K_ + k] - bottom_data[index]) / (max(sigma[j*K_ + k], Dtype(0)) + Dtype(0.00000001)) * top_diff[i*N_ + j]; } bottom_diff[index] = t; } } template <typename Dtype> static __global__ void compute_gradient_weight_label_l2_diag(const int nthreads, const int K_, const int M_, const Dtype* top_diff, const Dtype *bottom_data, const Dtype *weight, Dtype* weight_diff, const Dtype* label, const Dtype margin_mul, const Dtype center_coef, const Dtype *sigma, Dtype *sigma_diff, bool update_sigma, bool ignore_zero) { const int N_ = nthreads / K_; CUDA_KERNEL_LOOP(index, nthreads) { const int j = index / K_; const int k = index % K_; Dtype t = 0; Dtype t_sigma = 0; Dtype d = 0; Dtype safe_sigma = max(sigma[index], Dtype(0)) + Dtype(0.0001); for (int i = 0; i < M_; ++i) if (!ignore_zero || bottom_data[i*K_ + k]) { d = bottom_data[i*K_ + k] - weight[index]; if (j == (int)label[i]){ t += d * (margin_mul / safe_sigma * top_diff[i*N_ + j] - center_coef); if (update_sigma) t_sigma += d * d * margin_mul / (Dtype(2.0) * safe_sigma * safe_sigma) * top_diff[i*N_ + j]; } else{ t += d / safe_sigma * top_diff[i*N_ + j]; if (update_sigma) t_sigma += d * d / (Dtype(2.0) * safe_sigma * safe_sigma) * top_diff[i*N_ + j]; } } weight_diff[index] += t; if (update_sigma) sigma_diff[index] += t_sigma; } } template <typename Dtype> static __global__ void compute_gradient_bottom_label_ip(const int nthreads, const int K_, const int N_, const Dtype* top_diff, const Dtype *bottom_data, Dtype *bottom_diff, const Dtype *weight, const Dtype *label, const Dtype margin_mul, const Dtype center_coef) { CUDA_KERNEL_LOOP(index, nthreads) { const int i = index / K_; const int k = index % K_; Dtype t = 0; for (int j = 0; j < N_; ++j) { if (j == (int)label[i]) t += weight[j*K_ + k] * margin_mul * top_diff[i*N_ + j] + (bottom_data[index] - weight[j*K_ + k]) * center_coef; else t += weight[j*K_ + k] * top_diff[i*N_ + j]; } bottom_diff[index] = t; } } template <typename Dtype> static __global__ void compute_gradient_weight_label_ip(const int nthreads, const int K_, const int M_, const Dtype* top_diff, const Dtype *bottom_data, const Dtype *weight, Dtype* weight_diff, const Dtype* label, const Dtype margin_mul, const Dtype center_coef) { const int N_ = nthreads / K_; CUDA_KERNEL_LOOP(index, nthreads) { const int j = index / K_; const int k = index % K_; Dtype t = 0; for (int i = 0; i < M_; ++i) { if (j == (int)label[i]) t += margin_mul * bottom_data[i*K_ + k] * top_diff[i*N_ + j] + (weight[index] - bottom_data[i*K_ + k]) * center_coef; else t += bottom_data[i*K_ + k] * top_diff[i*N_ + j]; } weight_diff[index] += t; } } template <typename Dtype> static __global__ void compute_gradient_bottom_label_l1(const int nthreads, const int K_, const int N_, const Dtype* top_diff, const Dtype *bottom_data, Dtype *bottom_diff, const Dtype *weight, const Dtype *label, const Dtype margin_mul, const Dtype center_coef, bool ignore_zero) { CUDA_KERNEL_LOOP(index, nthreads) { const int i = index / K_; const int k = index % K_; Dtype t = 0; for (int j = 0; j < N_; ++j) if (!ignore_zero || bottom_data[index]) { if (j == (int)label[i]) t += ((weight[j*K_ + k] > bottom_data[index]) - (bottom_data[index] > weight[j*K_ + k])) * (margin_mul * top_diff[i*N_ + j] - center_coef); else t += ((weight[j*K_ + k] > bottom_data[index]) - (bottom_data[index] > weight[j*K_ + k])) * top_diff[i*N_ + j]; } bottom_diff[index] = t; } } template <typename Dtype> static __global__ void compute_gradient_weight_label_l1(const int nthreads, const int K_, const int M_, const Dtype* top_diff, const Dtype *bottom_data, const Dtype *weight, Dtype* weight_diff, const Dtype* label, const Dtype margin_mul, const Dtype center_coef, bool ignore_zero) { const int N_ = nthreads / K_; CUDA_KERNEL_LOOP(index, nthreads) { const int j = index / K_; const int k = index % K_; Dtype t = 0; for (int i = 0; i < M_; ++i) if (!ignore_zero || bottom_data[i*K_ + k]) { if (j == (int)label[i]) t += ((bottom_data[i*K_ + k] - weight[index]) - (weight[index] > bottom_data[i*K_ + k]) - (bottom_data[index] > weight[j*K_ + k])) * (margin_mul * top_diff[i*N_ + j] - center_coef); else t += ((bottom_data[i*K_ + k] - weight[index]) - (weight[index] > bottom_data[i*K_ + k]) - (bottom_data[index] > weight[j*K_ + k])) * top_diff[i*N_ + j]; } weight_diff[index] += t; } } template <typename Dtype> static __global__ void compute_gradient_bottom_l2(const int nthreads, const int K_, const int N_, const Dtype* top_diff, const Dtype *bottom_data, Dtype *bottom_diff, const Dtype *weight, bool ignore_zero) { CUDA_KERNEL_LOOP(index, nthreads) { const int i = index / K_; const int k = index % K_; Dtype t = 0; for (int j = 0; j < N_; ++j) if (!ignore_zero || bottom_data[index]) t += (weight[j*K_ + k] - bottom_data[index]) * top_diff[i*N_ + j]; bottom_diff[index] = t; } } template <typename Dtype> static __global__ void compute_gradient_weight_l2(const int nthreads, const int K_, const int M_, const Dtype* top_diff, const Dtype *bottom_data, const Dtype *weight, Dtype* weight_diff, bool ignore_zero) { const int N_ = nthreads / K_; CUDA_KERNEL_LOOP(index, nthreads) { const int j = index / K_; const int k = index % K_; Dtype t = 0; for (int i = 0; i < M_; ++i) if (!ignore_zero || bottom_data[i*K_ + k]) t += (bottom_data[i*K_ + k] - weight[index]) * top_diff[i*N_ + j]; weight_diff[index] += t; } } template <typename Dtype> static __global__ void compute_gradient_bottom_ip(const int nthreads, const int K_, const int N_, const Dtype* top_diff, const Dtype *bottom_data, Dtype *bottom_diff, const Dtype *weight) { CUDA_KERNEL_LOOP(index, nthreads) { const int i = index / K_; const int k = index % K_; Dtype t = 0; for (int j = 0; j < N_; ++j) t += weight[j*K_ + k] * top_diff[i*N_ + j]; bottom_diff[index] = t; } } template <typename Dtype> static __global__ void compute_gradient_weight_ip(const int nthreads, const int K_, const int M_, const Dtype* top_diff, const Dtype *bottom_data, const Dtype *weight, Dtype* weight_diff) { const int N_ = nthreads / K_; CUDA_KERNEL_LOOP(index, nthreads) { const int j = index / K_; const int k = index % K_; Dtype t = 0; for (int i = 0; i < M_; ++i) t += bottom_data[i*K_ + k] * top_diff[i*N_ + j]; weight_diff[index] += t; } } template <typename Dtype> static __global__ void compute_gradient_bottom_l1(const int nthreads, const int K_, const int N_, const Dtype* top_diff, const Dtype *bottom_data, Dtype *bottom_diff, const Dtype *weight, bool ignore_zero) { CUDA_KERNEL_LOOP(index, nthreads) { const int i = index / K_; const int k = index % K_; Dtype t = 0; for (int j = 0; j < N_; ++j) if (!ignore_zero || bottom_data[index]) t += ((weight[j*K_ + k] > bottom_data[index]) - (bottom_data[index] > weight[j*K_ + k])) * top_diff[i*N_ + j]; bottom_diff[index] = t; } } template <typename Dtype> static __global__ void compute_gradient_weight_l1(const int nthreads, const int K_, const int M_, const Dtype* top_diff, const Dtype *bottom_data, const Dtype *weight, Dtype* weight_diff, bool ignore_zero) { const int N_ = nthreads / K_; CUDA_KERNEL_LOOP(index, nthreads) { const int j = index / K_; const int k = index % K_; Dtype t = 0; for (int i = 0; i < M_; ++i) if (!ignore_zero || bottom_data[i*K_ + k]) t += ((bottom_data[i*K_ + k] - weight[index]) - (weight[index] > bottom_data[i*K_ + k]) - (bottom_data[index] > weight[j*K_ + k])) * top_diff[i*N_ + j]; weight_diff[index] += t; } } template <typename Dtype> void ClassDistanceLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); /*const*/ Dtype* weight = this->blobs_[0]->mutable_gpu_data(); Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff(); const Dtype* sigma = this->blobs_[1]->gpu_data(); const Dtype* dist = dist_.gpu_data(); Dtype* sigma_diff = this->blobs_[1]->mutable_gpu_diff(); const ClassDistanceParameter& param = this->layer_param_.class_distance_param(); bool ignore_zero = param.ignore_zero(); bool update_sigma = param.update_sigma(); bool isotropic = param.isotropic(); if (isotropic) caffe_gpu_set(N_, (Dtype)0, sigma_diff); else caffe_gpu_set(N_*K_, (Dtype)0, sigma_diff); if (bottom.size() == 2) { const Dtype* label = bottom[1]->gpu_data(); const Dtype center_coef_ = param.center_coef() / M_; const Dtype margin_mul_1 = 1 + (param.block_mul_grad() ? 0 : m_mul_.get()); switch (param.metric()) { case ClassDistanceParameter_Metric_L2: if (isotropic) { compute_gradient_bottom_label_l2<Dtype> << <CAFFE_GET_BLOCKS(M_*K_), CAFFE_CUDA_NUM_THREADS >> >( M_*K_, K_, N_, top_diff, bottom_data, bottom_diff, weight, label, margin_mul_1, center_coef_, sigma, ignore_zero); compute_gradient_weight_label_l2<Dtype> << <CAFFE_GET_BLOCKS(N_*K_), CAFFE_CUDA_NUM_THREADS >> >( N_*K_, K_, M_, top_diff, bottom_data, weight, weight_diff, label, margin_mul_1, center_coef_, sigma, sigma_diff, dist, update_sigma, ignore_zero); } else { compute_gradient_bottom_label_l2_diag<Dtype> << <CAFFE_GET_BLOCKS(M_*K_), CAFFE_CUDA_NUM_THREADS >> >( M_*K_, K_, N_, top_diff, bottom_data, bottom_diff, weight, label, margin_mul_1, center_coef_, sigma, ignore_zero); compute_gradient_weight_label_l2_diag<Dtype> << <CAFFE_GET_BLOCKS(N_*K_), CAFFE_CUDA_NUM_THREADS >> >( N_*K_, K_, M_, top_diff, bottom_data, weight, weight_diff, label, margin_mul_1, center_coef_, sigma, sigma_diff, update_sigma, ignore_zero); } break; case ClassDistanceParameter_Metric_IP: compute_gradient_bottom_label_ip<Dtype> << <CAFFE_GET_BLOCKS(M_*K_), CAFFE_CUDA_NUM_THREADS >> >( M_*K_, K_, N_, top_diff, bottom_data, bottom_diff, weight, label, margin_mul_1, center_coef_); compute_gradient_weight_label_ip<Dtype> << <CAFFE_GET_BLOCKS(N_*K_), CAFFE_CUDA_NUM_THREADS >> >( N_*K_, K_, M_, top_diff, bottom_data, weight, weight_diff, label, margin_mul_1, center_coef_); break; case ClassDistanceParameter_Metric_L1: compute_gradient_bottom_label_l1<Dtype> << <CAFFE_GET_BLOCKS(M_*K_), CAFFE_CUDA_NUM_THREADS >> >( M_*K_, K_, N_, top_diff, bottom_data, bottom_diff, weight, label, margin_mul_1, center_coef_, ignore_zero); compute_gradient_weight_label_l1<Dtype> << <CAFFE_GET_BLOCKS(N_*K_), CAFFE_CUDA_NUM_THREADS >> >( N_*K_, K_, M_, top_diff, bottom_data, weight, weight_diff, label, margin_mul_1, center_coef_, ignore_zero); break; } } else { switch (param.metric()) { case ClassDistanceParameter_Metric_L2: compute_gradient_bottom_l2<Dtype> << <CAFFE_GET_BLOCKS(M_*K_), CAFFE_CUDA_NUM_THREADS >> >( M_*K_, K_, N_, top_diff, bottom_data, bottom_diff, weight, ignore_zero); compute_gradient_weight_l2<Dtype> << <CAFFE_GET_BLOCKS(N_*K_), CAFFE_CUDA_NUM_THREADS >> >( N_*K_, K_, M_, top_diff, bottom_data, weight, weight_diff, ignore_zero); break; case ClassDistanceParameter_Metric_IP: compute_gradient_bottom_ip<Dtype> << <CAFFE_GET_BLOCKS(M_*K_), CAFFE_CUDA_NUM_THREADS >> >( M_*K_, K_, N_, top_diff, bottom_data, bottom_diff, weight); compute_gradient_weight_ip<Dtype> << <CAFFE_GET_BLOCKS(N_*K_), CAFFE_CUDA_NUM_THREADS >> >( N_*K_, K_, M_, top_diff, bottom_data, weight, weight_diff); break; case ClassDistanceParameter_Metric_L1: compute_gradient_bottom_l1<Dtype> << <CAFFE_GET_BLOCKS(M_*K_), CAFFE_CUDA_NUM_THREADS >> >( M_*K_, K_, N_, top_diff, bottom_data, bottom_diff, weight, ignore_zero); compute_gradient_weight_l1<Dtype> << <CAFFE_GET_BLOCKS(N_*K_), CAFFE_CUDA_NUM_THREADS >> >( N_*K_, K_, M_, top_diff, bottom_data, weight, weight_diff, ignore_zero); break; } } } INSTANTIATE_LAYER_GPU_FUNCS(ClassDistanceLayer); } // namespace caffe
a1d14987c349cdcfd1ed122f89188b9ccc4f9df0.cu
#include <vector> #include "caffe/filler.hpp" #include "caffe/layers/class_distance_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { ////////////////////////////////////////// template <typename Dtype> static __global__ void compute_top_l2(const int nthreads, const int N_, const int K_, const Dtype *bottom_data, Dtype *top_data, const Dtype *weight, const Dtype *sigma, Dtype *dist, bool ignore_zero, bool isotropic) { if (ignore_zero) { CUDA_KERNEL_LOOP(index, nthreads) { const int i = index / N_; const int j = index % N_; Dtype t = 0; for (int k = 0; k < K_; ++k) if (bottom_data[i*K_ + k]) { Dtype d = weight[j*K_ + k] - bottom_data[i*K_ + k]; t += d*d; } dist[index] = t; top_data[index] = Dtype(-0.5) * t / (max(sigma[j], Dtype(0)) + Dtype(0.0001)); } } else { CUDA_KERNEL_LOOP(index, nthreads) { const int i = index / N_; const int j = index % N_; Dtype t = 0; for (int k = 0; k < K_; ++k) { Dtype d = weight[j*K_ + k] - bottom_data[i*K_ + k]; if (isotropic) t += d*d; else t += d*d/max(sigma[j*K_ + k], Dtype(0) + Dtype(0.00000001)); } dist[index] = t; // only useful for 'isotropic' if (isotropic) top_data[index] = Dtype(-0.5) * t / (max(sigma[j], Dtype(0)) + Dtype(0.0001)); else top_data[index] = Dtype(-0.5) * t; } } } template <typename Dtype> static __global__ void compute_top_ip(const int nthreads, const int N_, const int K_, const Dtype *bottom_data, Dtype *top_data, const Dtype *weight) { CUDA_KERNEL_LOOP(index, nthreads) { const int i = index / N_; const int j = index % N_; Dtype t = 0; for (int k = 0; k < K_; ++k) { t += weight[j*K_ + k] * bottom_data[i*K_ + k]; } top_data[index] = t; } } template <typename Dtype> static __global__ void compute_top_l1(const int nthreads, const int N_, const int K_, const Dtype *bottom_data, Dtype *top_data, const Dtype *weight, bool ignore_zero) { if (ignore_zero) { CUDA_KERNEL_LOOP(index, nthreads) { const int i = index / N_; const int j = index % N_; Dtype t = 0; for (int k = 0; k < K_; ++k) { Dtype d = weight[j*K_ + k] - bottom_data[i*K_ + k]; t += abs(d); } top_data[index] = -t; } } else { CUDA_KERNEL_LOOP(index, nthreads) { const int i = index / N_; const int j = index % N_; Dtype t = 0; for (int k = 0; k < K_; ++k) { Dtype d = weight[j*K_ + k] - bottom_data[i*K_ + k]; t += abs(d); } top_data[index] = -t; } } } ////////////////////////////////////////// template <typename Dtype> static __global__ void margin_top(const int M_, const int N_, Dtype *top_data, const Dtype *label, const Dtype margin_mul, const Dtype margin_add) { CUDA_KERNEL_LOOP(i, M_) { const int y = (int)label[i]; top_data[i*N_ + y] += top_data[i*N_ + y] * margin_mul - margin_add; } } ////////////////////////////////////////// template <typename Dtype> void ClassDistanceLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const Dtype* weight = this->blobs_[0]->gpu_data(); const Dtype* sigma = this->blobs_[1]->gpu_data(); Dtype* dist = dist_.mutable_gpu_data(); const ClassDistanceParameter& param = this->layer_param_.class_distance_param(); bool isotropic = param.isotropic(); switch (param.metric()) { case ClassDistanceParameter_Metric_L2: compute_top_l2<Dtype> << <CAFFE_GET_BLOCKS(M_*N_), CAFFE_CUDA_NUM_THREADS >> >( M_*N_, N_, K_, bottom_data, top_data, weight, sigma, dist, param.ignore_zero() & (this->phase_ == TRAIN), isotropic); break; case ClassDistanceParameter_Metric_IP: compute_top_ip<Dtype> << <CAFFE_GET_BLOCKS(M_*N_), CAFFE_CUDA_NUM_THREADS >> >( M_*N_, N_, K_, bottom_data, top_data, weight); break; case ClassDistanceParameter_Metric_L1: compute_top_l1<Dtype> << <CAFFE_GET_BLOCKS(M_*N_), CAFFE_CUDA_NUM_THREADS >> >( M_*N_, N_, K_, bottom_data, top_data, weight, param.ignore_zero() & (this->phase_ == TRAIN)); break; } if (bottom.size() == 2 && this->phase_ == TRAIN) { Dtype margin_mul_ = this->m_mul_.get_iter("mul_margin"); Dtype margin_add_ = this->m_add_.get_iter("add_margin"); const Dtype* label = bottom[1]->gpu_data(); margin_top<Dtype> << <CAFFE_GET_BLOCKS(M_), CAFFE_CUDA_NUM_THREADS >> >( M_, N_, top_data, label, margin_mul_, margin_add_); } // validate that sigma > 0 const Dtype *sigma_cpu = this->blobs_[1]->cpu_data(); const int sigma_number = isotropic?N_:(N_*K_); for(int i=0; i<sigma_number; i++) if (sigma_cpu[i] <= eps_) { LOG(INFO) << "Dangerous sigma value, sigma[" << i << "]=" << sigma_cpu[i]; break; } /*if (bias_term_) { caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, N_, 1, (Dtype)1., bias_multiplier_.gpu_data(), this->blobs_[1]->gpu_data(), (Dtype)1., top_data); }*/ } //========================================== template <typename Dtype> static __global__ void compute_gradient_bottom_label_l2(const int nthreads, const int K_, const int N_, const Dtype* top_diff, const Dtype *bottom_data, Dtype *bottom_diff, const Dtype *weight, const Dtype *label, const Dtype margin_mul, const Dtype center_coef, const Dtype *sigma, bool ignore_zero) { CUDA_KERNEL_LOOP(index, nthreads) { const int i = index / K_; const int k = index % K_; Dtype t = 0; for (int j = 0; j < N_; ++j) if (!ignore_zero || bottom_data[index]) { if (j == (int)label[i]) t += (weight[j*K_ + k] - bottom_data[index]) * (margin_mul / (max(sigma[j], Dtype(0)) + Dtype(0.0001)) * top_diff[i*N_ + j] - center_coef); else t += (weight[j*K_ + k] - bottom_data[index]) / (max(sigma[j], Dtype(0)) + Dtype(0.0001)) * top_diff[i*N_ + j]; } bottom_diff[index] = t; } } template <typename Dtype> static __global__ void compute_gradient_weight_label_l2(const int nthreads, const int K_, const int M_, const Dtype* top_diff, const Dtype *bottom_data, const Dtype *weight, Dtype* weight_diff, const Dtype* label, const Dtype margin_mul, const Dtype center_coef, const Dtype *sigma, Dtype *sigma_diff, const Dtype *dist, bool update_sigma, bool ignore_zero) { const int N_ = nthreads / K_; CUDA_KERNEL_LOOP(index, nthreads) { const int j = index / K_; const int k = index % K_; Dtype t = 0; Dtype t_sigma = 0; for (int i = 0; i < M_; ++i) if (!ignore_zero || bottom_data[i*K_ + k]) { if (j == (int)label[i]){ t += (bottom_data[i*K_ + k] - weight[index]) * (margin_mul / (max(sigma[j], Dtype(0)) + Dtype(0.0001)) * top_diff[i*N_ + j] - center_coef); if (update_sigma && k==0) t_sigma += dist[i * N_ + j] * margin_mul / (Dtype(2.0) * (max(sigma[j], Dtype(0)) + Dtype(0.0001)) * sigma[j]) * top_diff[i*N_ + j]; } else{ t += (bottom_data[i*K_ + k] - weight[index]) / (max(sigma[j], Dtype(0)) + Dtype(0.0001)) * top_diff[i*N_ + j]; if (update_sigma && k==0) t_sigma += dist[i * N_ + j] / (Dtype(2.0) * (max(sigma[j], Dtype(0)) + Dtype(0.0001)) * sigma[j]) * top_diff[i*N_ + j]; } } weight_diff[index] += t; if (update_sigma && k == 0) sigma_diff[j] += t_sigma; } } template <typename Dtype> static __global__ void compute_gradient_bottom_label_l2_diag(const int nthreads, const int K_, const int N_, const Dtype* top_diff, const Dtype *bottom_data, Dtype *bottom_diff, const Dtype *weight, const Dtype *label, const Dtype margin_mul, const Dtype center_coef, const Dtype *sigma, bool ignore_zero) { CUDA_KERNEL_LOOP(index, nthreads) { const int i = index / K_; const int k = index % K_; Dtype t = 0; for (int j = 0; j < N_; ++j) if (!ignore_zero || bottom_data[index]) { if (j == (int)label[i]) t += (weight[j*K_ + k] - bottom_data[index]) * (margin_mul / (max(sigma[j*K_ + k] , Dtype(0)) + Dtype(0.00000001)) * top_diff[i*N_ + j] - center_coef); else t += (weight[j*K_ + k] - bottom_data[index]) / (max(sigma[j*K_ + k], Dtype(0)) + Dtype(0.00000001)) * top_diff[i*N_ + j]; } bottom_diff[index] = t; } } template <typename Dtype> static __global__ void compute_gradient_weight_label_l2_diag(const int nthreads, const int K_, const int M_, const Dtype* top_diff, const Dtype *bottom_data, const Dtype *weight, Dtype* weight_diff, const Dtype* label, const Dtype margin_mul, const Dtype center_coef, const Dtype *sigma, Dtype *sigma_diff, bool update_sigma, bool ignore_zero) { const int N_ = nthreads / K_; CUDA_KERNEL_LOOP(index, nthreads) { const int j = index / K_; const int k = index % K_; Dtype t = 0; Dtype t_sigma = 0; Dtype d = 0; Dtype safe_sigma = max(sigma[index], Dtype(0)) + Dtype(0.0001); for (int i = 0; i < M_; ++i) if (!ignore_zero || bottom_data[i*K_ + k]) { d = bottom_data[i*K_ + k] - weight[index]; if (j == (int)label[i]){ t += d * (margin_mul / safe_sigma * top_diff[i*N_ + j] - center_coef); if (update_sigma) t_sigma += d * d * margin_mul / (Dtype(2.0) * safe_sigma * safe_sigma) * top_diff[i*N_ + j]; } else{ t += d / safe_sigma * top_diff[i*N_ + j]; if (update_sigma) t_sigma += d * d / (Dtype(2.0) * safe_sigma * safe_sigma) * top_diff[i*N_ + j]; } } weight_diff[index] += t; if (update_sigma) sigma_diff[index] += t_sigma; } } template <typename Dtype> static __global__ void compute_gradient_bottom_label_ip(const int nthreads, const int K_, const int N_, const Dtype* top_diff, const Dtype *bottom_data, Dtype *bottom_diff, const Dtype *weight, const Dtype *label, const Dtype margin_mul, const Dtype center_coef) { CUDA_KERNEL_LOOP(index, nthreads) { const int i = index / K_; const int k = index % K_; Dtype t = 0; for (int j = 0; j < N_; ++j) { if (j == (int)label[i]) t += weight[j*K_ + k] * margin_mul * top_diff[i*N_ + j] + (bottom_data[index] - weight[j*K_ + k]) * center_coef; else t += weight[j*K_ + k] * top_diff[i*N_ + j]; } bottom_diff[index] = t; } } template <typename Dtype> static __global__ void compute_gradient_weight_label_ip(const int nthreads, const int K_, const int M_, const Dtype* top_diff, const Dtype *bottom_data, const Dtype *weight, Dtype* weight_diff, const Dtype* label, const Dtype margin_mul, const Dtype center_coef) { const int N_ = nthreads / K_; CUDA_KERNEL_LOOP(index, nthreads) { const int j = index / K_; const int k = index % K_; Dtype t = 0; for (int i = 0; i < M_; ++i) { if (j == (int)label[i]) t += margin_mul * bottom_data[i*K_ + k] * top_diff[i*N_ + j] + (weight[index] - bottom_data[i*K_ + k]) * center_coef; else t += bottom_data[i*K_ + k] * top_diff[i*N_ + j]; } weight_diff[index] += t; } } template <typename Dtype> static __global__ void compute_gradient_bottom_label_l1(const int nthreads, const int K_, const int N_, const Dtype* top_diff, const Dtype *bottom_data, Dtype *bottom_diff, const Dtype *weight, const Dtype *label, const Dtype margin_mul, const Dtype center_coef, bool ignore_zero) { CUDA_KERNEL_LOOP(index, nthreads) { const int i = index / K_; const int k = index % K_; Dtype t = 0; for (int j = 0; j < N_; ++j) if (!ignore_zero || bottom_data[index]) { if (j == (int)label[i]) t += ((weight[j*K_ + k] > bottom_data[index]) - (bottom_data[index] > weight[j*K_ + k])) * (margin_mul * top_diff[i*N_ + j] - center_coef); else t += ((weight[j*K_ + k] > bottom_data[index]) - (bottom_data[index] > weight[j*K_ + k])) * top_diff[i*N_ + j]; } bottom_diff[index] = t; } } template <typename Dtype> static __global__ void compute_gradient_weight_label_l1(const int nthreads, const int K_, const int M_, const Dtype* top_diff, const Dtype *bottom_data, const Dtype *weight, Dtype* weight_diff, const Dtype* label, const Dtype margin_mul, const Dtype center_coef, bool ignore_zero) { const int N_ = nthreads / K_; CUDA_KERNEL_LOOP(index, nthreads) { const int j = index / K_; const int k = index % K_; Dtype t = 0; for (int i = 0; i < M_; ++i) if (!ignore_zero || bottom_data[i*K_ + k]) { if (j == (int)label[i]) t += ((bottom_data[i*K_ + k] - weight[index]) - (weight[index] > bottom_data[i*K_ + k]) - (bottom_data[index] > weight[j*K_ + k])) * (margin_mul * top_diff[i*N_ + j] - center_coef); else t += ((bottom_data[i*K_ + k] - weight[index]) - (weight[index] > bottom_data[i*K_ + k]) - (bottom_data[index] > weight[j*K_ + k])) * top_diff[i*N_ + j]; } weight_diff[index] += t; } } template <typename Dtype> static __global__ void compute_gradient_bottom_l2(const int nthreads, const int K_, const int N_, const Dtype* top_diff, const Dtype *bottom_data, Dtype *bottom_diff, const Dtype *weight, bool ignore_zero) { CUDA_KERNEL_LOOP(index, nthreads) { const int i = index / K_; const int k = index % K_; Dtype t = 0; for (int j = 0; j < N_; ++j) if (!ignore_zero || bottom_data[index]) t += (weight[j*K_ + k] - bottom_data[index]) * top_diff[i*N_ + j]; bottom_diff[index] = t; } } template <typename Dtype> static __global__ void compute_gradient_weight_l2(const int nthreads, const int K_, const int M_, const Dtype* top_diff, const Dtype *bottom_data, const Dtype *weight, Dtype* weight_diff, bool ignore_zero) { const int N_ = nthreads / K_; CUDA_KERNEL_LOOP(index, nthreads) { const int j = index / K_; const int k = index % K_; Dtype t = 0; for (int i = 0; i < M_; ++i) if (!ignore_zero || bottom_data[i*K_ + k]) t += (bottom_data[i*K_ + k] - weight[index]) * top_diff[i*N_ + j]; weight_diff[index] += t; } } template <typename Dtype> static __global__ void compute_gradient_bottom_ip(const int nthreads, const int K_, const int N_, const Dtype* top_diff, const Dtype *bottom_data, Dtype *bottom_diff, const Dtype *weight) { CUDA_KERNEL_LOOP(index, nthreads) { const int i = index / K_; const int k = index % K_; Dtype t = 0; for (int j = 0; j < N_; ++j) t += weight[j*K_ + k] * top_diff[i*N_ + j]; bottom_diff[index] = t; } } template <typename Dtype> static __global__ void compute_gradient_weight_ip(const int nthreads, const int K_, const int M_, const Dtype* top_diff, const Dtype *bottom_data, const Dtype *weight, Dtype* weight_diff) { const int N_ = nthreads / K_; CUDA_KERNEL_LOOP(index, nthreads) { const int j = index / K_; const int k = index % K_; Dtype t = 0; for (int i = 0; i < M_; ++i) t += bottom_data[i*K_ + k] * top_diff[i*N_ + j]; weight_diff[index] += t; } } template <typename Dtype> static __global__ void compute_gradient_bottom_l1(const int nthreads, const int K_, const int N_, const Dtype* top_diff, const Dtype *bottom_data, Dtype *bottom_diff, const Dtype *weight, bool ignore_zero) { CUDA_KERNEL_LOOP(index, nthreads) { const int i = index / K_; const int k = index % K_; Dtype t = 0; for (int j = 0; j < N_; ++j) if (!ignore_zero || bottom_data[index]) t += ((weight[j*K_ + k] > bottom_data[index]) - (bottom_data[index] > weight[j*K_ + k])) * top_diff[i*N_ + j]; bottom_diff[index] = t; } } template <typename Dtype> static __global__ void compute_gradient_weight_l1(const int nthreads, const int K_, const int M_, const Dtype* top_diff, const Dtype *bottom_data, const Dtype *weight, Dtype* weight_diff, bool ignore_zero) { const int N_ = nthreads / K_; CUDA_KERNEL_LOOP(index, nthreads) { const int j = index / K_; const int k = index % K_; Dtype t = 0; for (int i = 0; i < M_; ++i) if (!ignore_zero || bottom_data[i*K_ + k]) t += ((bottom_data[i*K_ + k] - weight[index]) - (weight[index] > bottom_data[i*K_ + k]) - (bottom_data[index] > weight[j*K_ + k])) * top_diff[i*N_ + j]; weight_diff[index] += t; } } template <typename Dtype> void ClassDistanceLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); /*const*/ Dtype* weight = this->blobs_[0]->mutable_gpu_data(); Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff(); const Dtype* sigma = this->blobs_[1]->gpu_data(); const Dtype* dist = dist_.gpu_data(); Dtype* sigma_diff = this->blobs_[1]->mutable_gpu_diff(); const ClassDistanceParameter& param = this->layer_param_.class_distance_param(); bool ignore_zero = param.ignore_zero(); bool update_sigma = param.update_sigma(); bool isotropic = param.isotropic(); if (isotropic) caffe_gpu_set(N_, (Dtype)0, sigma_diff); else caffe_gpu_set(N_*K_, (Dtype)0, sigma_diff); if (bottom.size() == 2) { const Dtype* label = bottom[1]->gpu_data(); const Dtype center_coef_ = param.center_coef() / M_; const Dtype margin_mul_1 = 1 + (param.block_mul_grad() ? 0 : m_mul_.get()); switch (param.metric()) { case ClassDistanceParameter_Metric_L2: if (isotropic) { compute_gradient_bottom_label_l2<Dtype> << <CAFFE_GET_BLOCKS(M_*K_), CAFFE_CUDA_NUM_THREADS >> >( M_*K_, K_, N_, top_diff, bottom_data, bottom_diff, weight, label, margin_mul_1, center_coef_, sigma, ignore_zero); compute_gradient_weight_label_l2<Dtype> << <CAFFE_GET_BLOCKS(N_*K_), CAFFE_CUDA_NUM_THREADS >> >( N_*K_, K_, M_, top_diff, bottom_data, weight, weight_diff, label, margin_mul_1, center_coef_, sigma, sigma_diff, dist, update_sigma, ignore_zero); } else { compute_gradient_bottom_label_l2_diag<Dtype> << <CAFFE_GET_BLOCKS(M_*K_), CAFFE_CUDA_NUM_THREADS >> >( M_*K_, K_, N_, top_diff, bottom_data, bottom_diff, weight, label, margin_mul_1, center_coef_, sigma, ignore_zero); compute_gradient_weight_label_l2_diag<Dtype> << <CAFFE_GET_BLOCKS(N_*K_), CAFFE_CUDA_NUM_THREADS >> >( N_*K_, K_, M_, top_diff, bottom_data, weight, weight_diff, label, margin_mul_1, center_coef_, sigma, sigma_diff, update_sigma, ignore_zero); } break; case ClassDistanceParameter_Metric_IP: compute_gradient_bottom_label_ip<Dtype> << <CAFFE_GET_BLOCKS(M_*K_), CAFFE_CUDA_NUM_THREADS >> >( M_*K_, K_, N_, top_diff, bottom_data, bottom_diff, weight, label, margin_mul_1, center_coef_); compute_gradient_weight_label_ip<Dtype> << <CAFFE_GET_BLOCKS(N_*K_), CAFFE_CUDA_NUM_THREADS >> >( N_*K_, K_, M_, top_diff, bottom_data, weight, weight_diff, label, margin_mul_1, center_coef_); break; case ClassDistanceParameter_Metric_L1: compute_gradient_bottom_label_l1<Dtype> << <CAFFE_GET_BLOCKS(M_*K_), CAFFE_CUDA_NUM_THREADS >> >( M_*K_, K_, N_, top_diff, bottom_data, bottom_diff, weight, label, margin_mul_1, center_coef_, ignore_zero); compute_gradient_weight_label_l1<Dtype> << <CAFFE_GET_BLOCKS(N_*K_), CAFFE_CUDA_NUM_THREADS >> >( N_*K_, K_, M_, top_diff, bottom_data, weight, weight_diff, label, margin_mul_1, center_coef_, ignore_zero); break; } } else { switch (param.metric()) { case ClassDistanceParameter_Metric_L2: compute_gradient_bottom_l2<Dtype> << <CAFFE_GET_BLOCKS(M_*K_), CAFFE_CUDA_NUM_THREADS >> >( M_*K_, K_, N_, top_diff, bottom_data, bottom_diff, weight, ignore_zero); compute_gradient_weight_l2<Dtype> << <CAFFE_GET_BLOCKS(N_*K_), CAFFE_CUDA_NUM_THREADS >> >( N_*K_, K_, M_, top_diff, bottom_data, weight, weight_diff, ignore_zero); break; case ClassDistanceParameter_Metric_IP: compute_gradient_bottom_ip<Dtype> << <CAFFE_GET_BLOCKS(M_*K_), CAFFE_CUDA_NUM_THREADS >> >( M_*K_, K_, N_, top_diff, bottom_data, bottom_diff, weight); compute_gradient_weight_ip<Dtype> << <CAFFE_GET_BLOCKS(N_*K_), CAFFE_CUDA_NUM_THREADS >> >( N_*K_, K_, M_, top_diff, bottom_data, weight, weight_diff); break; case ClassDistanceParameter_Metric_L1: compute_gradient_bottom_l1<Dtype> << <CAFFE_GET_BLOCKS(M_*K_), CAFFE_CUDA_NUM_THREADS >> >( M_*K_, K_, N_, top_diff, bottom_data, bottom_diff, weight, ignore_zero); compute_gradient_weight_l1<Dtype> << <CAFFE_GET_BLOCKS(N_*K_), CAFFE_CUDA_NUM_THREADS >> >( N_*K_, K_, M_, top_diff, bottom_data, weight, weight_diff, ignore_zero); break; } } } INSTANTIATE_LAYER_GPU_FUNCS(ClassDistanceLayer); } // namespace caffe
5b671824782b027c39af27e73502a7a258baf36e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #undef _GLIBCXX_USE_INT128 #include "cuda_sort2.h" #include "particles_cuda.h" #include "psc_bnd_cuda.h" #define PFX(x) xchg_##x #include "constants.c" #if 0 // FIXME const mem for dims? // FIXME probably should do our own loop rather than use blockIdx __global__ static void exchange_particles(int n_part, particles_cuda_dev_t h_dev, int ldimsx, int ldimsy, int ldimsz) { int ldims[3] = { ldimsx, ldimsy, ldimsz }; int xm[3]; for (int d = 0; d < 3; d++) { xm[d] = ldims[d] / d_consts.dxi[d]; } int i = threadIdx.x + THREADS_PER_BLOCK * blockIdx.x; if (i < n_part) { particle_cuda_real_t xi[3] = { h_dev.xi4[i].x * d_consts.dxi[0], h_dev.xi4[i].y * d_consts.dxi[1], h_dev.xi4[i].z * d_consts.dxi[2] }; int pos[3]; for (int d = 0; d < 3; d++) { pos[d] = __float2int_rd(xi[d]); } if (pos[1] < 0) { h_dev.xi4[i].y += xm[1]; if (h_dev.xi4[i].y >= xm[1]) h_dev.xi4[i].y = 0.f; } if (pos[2] < 0) { h_dev.xi4[i].z += xm[2]; if (h_dev.xi4[i].z >= xm[2]) h_dev.xi4[i].z = 0.f; } if (pos[1] >= ldims[1]) { h_dev.xi4[i].y -= xm[1]; } if (pos[2] >= ldims[2]) { h_dev.xi4[i].z -= xm[2]; } } } EXTERN_C void cuda_exchange_particles(int p, struct psc_particles *prts) { struct psc_particles_cuda *cuda = psc_particles_cuda(prts); struct psc_patch *patch = &ppsc->patch[p]; xchg_set_constants(prts, NULL); int dimBlock[2] = { THREADS_PER_BLOCK, 1 }; int dimGrid[2] = { (prts->n_part + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, 1 }; RUN_KERNEL(dimGrid, dimBlock, exchange_particles, (prts->n_part, *cuda->h_dev, patch->ldims[0], patch->ldims[1], patch->ldims[2])); } #endif // ---------------------------------------------------------------------- // cuda_mprts_find_block_indices_2_total // // like cuda_find_block_indices, but handles out-of-bound // particles __global__ static void mprts_find_block_indices_2_total(struct cuda_params prm, float4 *d_xi4, unsigned int *d_off, unsigned int *d_bidx, int nr_patches) { int tid = threadIdx.x; int block_pos[3]; block_pos[1] = blockIdx.x; block_pos[2] = blockIdx.y % prm.b_mx[2]; int bid = block_pos_to_block_idx(block_pos, prm.b_mx); int p = blockIdx.y / prm.b_mx[2]; int nr_blocks = prm.b_mx[1] * prm.b_mx[2]; // FIXME/OPT, could be done better like reorder_send_buf int block_begin = d_off[bid + p * nr_blocks]; int block_end = d_off[bid + p * nr_blocks + 1]; for (int n = block_begin + tid; n < block_end; n += THREADS_PER_BLOCK) { float4 xi4 = d_xi4[n]; unsigned int block_pos_y = __float2int_rd(xi4.y * prm.b_dxi[1]); unsigned int block_pos_z = __float2int_rd(xi4.z * prm.b_dxi[2]); int block_idx; if (block_pos_y >= prm.b_mx[1] || block_pos_z >= prm.b_mx[2]) { block_idx = nr_blocks * nr_patches; } else { block_idx = block_pos_z * prm.b_mx[1] + block_pos_y + p * nr_blocks; } d_bidx[n] = block_idx; } } EXTERN_C void cuda_mprts_find_block_indices_2_total(struct psc_mparticles *mprts) { struct psc_mparticles_cuda *mprts_cuda = psc_mparticles_cuda(mprts); if (mprts->nr_patches == 0) { return; } struct cuda_params prm; set_params(&prm, ppsc, mprts, NULL); int dimBlock[2] = { THREADS_PER_BLOCK, 1 }; int dimGrid[2] = { prm.b_mx[1], prm.b_mx[2] * mprts->nr_patches }; RUN_KERNEL(dimGrid, dimBlock, mprts_find_block_indices_2_total, (prm, mprts_cuda->d_xi4, mprts_cuda->d_off, mprts_cuda->d_bidx, mprts->nr_patches)); free_params(&prm); } // ---------------------------------------------------------------------- // cuda_mprts_find_block_keys __global__ static void mprts_find_block_keys(struct cuda_params prm, float4 *d_xi4, unsigned int *d_off, unsigned int *d_bidx, int nr_total_blocks) { int tid = threadIdx.x; int bid = blockIdx.x; int nr_blocks = prm.b_mx[1] * prm.b_mx[2]; int p = bid / nr_blocks; int block_begin = d_off[bid]; int block_end = d_off[bid + 1]; for (int n = block_begin + tid; n < block_end; n += THREADS_PER_BLOCK) { float4 xi4 = d_xi4[n]; unsigned int block_pos_y = __float2int_rd(xi4.y * prm.b_dxi[1]); unsigned int block_pos_z = __float2int_rd(xi4.z * prm.b_dxi[2]); int block_idx; if (block_pos_y >= prm.b_mx[1] || block_pos_z >= prm.b_mx[2]) { block_idx = CUDA_BND_S_OOB; } else { int bidx = block_pos_z * prm.b_mx[1] + block_pos_y + p * nr_blocks; int b_diff = bid - bidx + prm.b_mx[1] + 1; int d1 = b_diff % prm.b_mx[1]; int d2 = b_diff / prm.b_mx[1]; block_idx = d2 * 3 + d1; } d_bidx[n] = block_idx; } } EXTERN_C void cuda_mprts_find_block_keys(struct psc_mparticles *mprts) { struct psc_mparticles_cuda *mprts_cuda = psc_mparticles_cuda(mprts); if (mprts->nr_patches == 0) { return; } struct cuda_params prm; set_params(&prm, ppsc, mprts, NULL); int dimBlock[2] = { THREADS_PER_BLOCK, 1 }; int dimGrid[2] = { mprts_cuda->nr_total_blocks, 1 }; RUN_KERNEL(dimGrid, dimBlock, mprts_find_block_keys, (prm, mprts_cuda->d_xi4, mprts_cuda->d_off, mprts_cuda->d_bidx, mprts_cuda->nr_total_blocks)); free_params(&prm); } // ---------------------------------------------------------------------- // cuda_mprts_find_block_indices_ids_total __global__ static void mprts_find_block_indices_ids_total(struct cuda_params prm, float4 *d_xi4, particles_cuda_dev_t *d_cp_prts, unsigned int *d_bidx, unsigned int *d_ids, int nr_patches) { int n = threadIdx.x + THREADS_PER_BLOCK * blockIdx.x; int nr_blocks = prm.b_mx[1] * prm.b_mx[2]; unsigned int off = 0; for (int p = 0; p < nr_patches; p++) { if (n < d_cp_prts[p].n_part) { float4 xi4 = d_xi4[n + off]; unsigned int block_pos_y = __float2int_rd(xi4.y * prm.b_dxi[1]); unsigned int block_pos_z = __float2int_rd(xi4.z * prm.b_dxi[2]); int block_idx; if (block_pos_y >= prm.b_mx[1] || block_pos_z >= prm.b_mx[2]) { block_idx = -1; // not supposed to happen here! } else { block_idx = block_pos_z * prm.b_mx[1] + block_pos_y + p * nr_blocks; } d_bidx[n + off] = block_idx; d_ids[n + off] = n + off; } off += d_cp_prts[p].n_part; } } EXTERN_C void cuda_mprts_find_block_indices_ids_total(struct psc_mparticles *mprts) { struct psc_mparticles_cuda *mprts_cuda = psc_mparticles_cuda(mprts); if (mprts->nr_patches == 0) { return; } int max_n_part = 0; int nr_prts = 0; mprts_cuda->nr_prts_send = 0; for (int p = 0; p < mprts->nr_patches; p++) { struct psc_particles *prts = psc_mparticles_get_patch(mprts, p); struct psc_particles_cuda *cuda = psc_particles_cuda(prts); mprts_cuda->nr_prts_send += cuda->bnd_n_send; if (prts->n_part > max_n_part) { max_n_part = prts->n_part; } cuda->h_dev->n_part = prts->n_part; nr_prts += prts->n_part; } mprts_cuda->nr_prts = nr_prts; psc_mparticles_cuda_copy_to_dev(mprts); struct cuda_params prm; set_params(&prm, ppsc, mprts, NULL); int dimBlock[2] = { THREADS_PER_BLOCK, 1 }; int dimGrid[2] = { (max_n_part + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, 1 }; RUN_KERNEL(dimGrid, dimBlock, mprts_find_block_indices_ids_total, (prm, mprts_cuda->d_xi4, psc_mparticles_cuda(mprts)->d_dev, mprts_cuda->d_bidx, mprts_cuda->d_ids, mprts->nr_patches)); free_params(&prm); } // ====================================================================== // cuda_mprts_find_block_indices_3 EXTERN_C void cuda_mprts_find_block_indices_3(struct psc_mparticles *mprts) { struct psc_mparticles_cuda *mprts_cuda = psc_mparticles_cuda(mprts); unsigned int nr_recv = mprts_cuda->nr_prts_recv; unsigned int nr_prts_prev = mprts_cuda->nr_prts - nr_recv; // for consistency, use same block indices that we counted earlier // OPT unneeded? check(hipMemcpy(mprts_cuda->d_bidx + nr_prts_prev, mprts_cuda->h_bnd_idx, nr_recv * sizeof(*mprts_cuda->d_bidx), hipMemcpyHostToDevice)); // slight abuse of the now unused last part of spine_cnts check(hipMemcpy(mprts_cuda->d_bnd_spine_cnts + 10 * mprts_cuda->nr_total_blocks, mprts_cuda->h_bnd_cnt, mprts_cuda->nr_total_blocks * sizeof(*mprts_cuda->d_bnd_spine_cnts), hipMemcpyHostToDevice)); check(hipMemcpy(mprts_cuda->d_alt_bidx + nr_prts_prev, mprts_cuda->h_bnd_off, nr_recv * sizeof(*mprts_cuda->d_alt_bidx), hipMemcpyHostToDevice)); free(mprts_cuda->h_bnd_idx); free(mprts_cuda->h_bnd_off); } // ====================================================================== // mprts_reorder_send_buf_total __global__ static void mprts_reorder_send_buf_total(int nr_prts, int nr_total_blocks, unsigned int *d_bidx, unsigned int *d_sums, float4 *d_xi4, float4 *d_pxi4, float4 *d_xchg_xi4, float4 *d_xchg_pxi4) { int i = threadIdx.x + THREADS_PER_BLOCK * blockIdx.x; if (i >= nr_prts) return; if (d_bidx[i] == CUDA_BND_S_OOB) { int j = d_sums[i]; d_xchg_xi4[j] = d_xi4[i]; d_xchg_pxi4[j] = d_pxi4[i]; } } EXTERN_C void cuda_mprts_reorder_send_buf_total(struct psc_mparticles *mprts) { struct psc_mparticles_cuda *mprts_cuda = psc_mparticles_cuda(mprts); if (mprts->nr_patches == 0) return; float4 *xchg_xi4 = mprts_cuda->d_xi4 + mprts_cuda->nr_prts; float4 *xchg_pxi4 = mprts_cuda->d_pxi4 + mprts_cuda->nr_prts; assert(mprts_cuda->nr_prts + mprts_cuda->nr_prts_send < mprts_cuda->nr_alloced); int dimBlock[2] = { THREADS_PER_BLOCK, 1 }; int dimGrid[2] = { (mprts_cuda->nr_prts + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, 1 }; RUN_KERNEL(dimGrid, dimBlock, mprts_reorder_send_buf_total, (mprts_cuda->nr_prts, mprts_cuda->nr_total_blocks, mprts_cuda->d_bidx, mprts_cuda->d_sums, mprts_cuda->d_xi4, mprts_cuda->d_pxi4, xchg_xi4, xchg_pxi4)); } // ====================================================================== // psc_mparticles_cuda_swap_alt static void psc_mparticles_cuda_swap_alt(struct psc_mparticles *mprts) { struct psc_mparticles_cuda *mprts_cuda = psc_mparticles_cuda(mprts); float4 *tmp_xi4 = mprts_cuda->d_alt_xi4; float4 *tmp_pxi4 = mprts_cuda->d_alt_pxi4; mprts_cuda->d_alt_xi4 = mprts_cuda->d_xi4; mprts_cuda->d_alt_pxi4 = mprts_cuda->d_pxi4; mprts_cuda->d_xi4 = tmp_xi4; mprts_cuda->d_pxi4 = tmp_pxi4; } // ====================================================================== // cuda_mprts_reorder __global__ static void mprts_reorder(int nr_prts, unsigned int *d_ids, float4 *xi4, float4 *pxi4, float4 *alt_xi4, float4 *alt_pxi4) { int i = threadIdx.x + THREADS_PER_BLOCK * blockIdx.x; if (i < nr_prts) { int j = d_ids[i]; alt_xi4[i] = xi4[j]; alt_pxi4[i] = pxi4[j]; } } void cuda_mprts_reorder(struct psc_mparticles *mprts) { struct psc_mparticles_cuda *mprts_cuda = psc_mparticles_cuda(mprts); int dimBlock[2] = { THREADS_PER_BLOCK, 1 }; int dimGrid[2] = { (mprts_cuda->nr_prts + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, 1 }; RUN_KERNEL(dimGrid, dimBlock, mprts_reorder, (mprts_cuda->nr_prts, mprts_cuda->d_ids, mprts_cuda->d_xi4, mprts_cuda->d_pxi4, mprts_cuda->d_alt_xi4, mprts_cuda->d_alt_pxi4)); psc_mparticles_cuda_swap_alt(mprts); } // ====================================================================== // reorder_and_offsets __global__ static void mprts_reorder_and_offsets(int nr_prts, float4 *xi4, float4 *pxi4, float4 *alt_xi4, float4 *alt_pxi4, unsigned int *d_bidx, unsigned int *d_ids, unsigned int *d_off, int last_block) { int i = threadIdx.x + THREADS_PER_BLOCK * blockIdx.x; if (i > nr_prts) return; int block, prev_block; if (i < nr_prts) { alt_xi4[i] = xi4[d_ids[i]]; alt_pxi4[i] = pxi4[d_ids[i]]; block = d_bidx[i]; } else { // needed if there is no particle in the last block block = last_block; } // OPT: d_bidx[i-1] could use shmem // create offsets per block into particle array prev_block = -1; if (i > 0) { prev_block = d_bidx[i-1]; } for (int b = prev_block + 1; b <= block; b++) { d_off[b] = i; } } void cuda_mprts_reorder_and_offsets(struct psc_mparticles *mprts) { struct psc_mparticles_cuda *mprts_cuda = psc_mparticles_cuda(mprts); if (mprts->nr_patches == 0) { return; } int nr_blocks = psc_particles_cuda(psc_mparticles_get_patch(mprts, 0))->nr_blocks; int dimBlock[2] = { THREADS_PER_BLOCK, 1 }; int dimGrid[2] = { (mprts_cuda->nr_prts + 1 + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, 1 }; RUN_KERNEL(dimGrid, dimBlock, mprts_reorder_and_offsets, (mprts_cuda->nr_prts, mprts_cuda->d_xi4, mprts_cuda->d_pxi4, mprts_cuda->d_alt_xi4, mprts_cuda->d_alt_pxi4, mprts_cuda->d_bidx, mprts_cuda->d_ids, mprts_cuda->d_off, mprts->nr_patches * nr_blocks)); psc_mparticles_cuda_swap_alt(mprts); psc_mparticles_cuda_copy_to_dev(mprts); } // ====================================================================== // cuda_mprts_copy_from_dev void cuda_mprts_copy_from_dev(struct psc_mparticles *mprts) { struct psc_mparticles_cuda *mprts_cuda = psc_mparticles_cuda(mprts); if (mprts->nr_patches == 0) { return; } mprts_cuda->h_bnd_xi4 = new float4[mprts_cuda->nr_prts_send]; mprts_cuda->h_bnd_pxi4 = new float4[mprts_cuda->nr_prts_send]; check(hipMemcpy(mprts_cuda->h_bnd_xi4, mprts_cuda->d_xi4 + mprts_cuda->nr_prts, mprts_cuda->nr_prts_send * sizeof(float4), hipMemcpyDeviceToHost)); check(hipMemcpy(mprts_cuda->h_bnd_pxi4, mprts_cuda->d_pxi4 + mprts_cuda->nr_prts, mprts_cuda->nr_prts_send * sizeof(float4), hipMemcpyDeviceToHost)); } //====================================================================== // cuda_mprts_convert_from_cuda void cuda_mprts_convert_from_cuda(struct psc_mparticles *mprts) { struct psc_mparticles_cuda *mprts_cuda = psc_mparticles_cuda(mprts); if (mprts->nr_patches == 0) { return; } float4 *bnd_xi4 = mprts_cuda->h_bnd_xi4; float4 *bnd_pxi4 = mprts_cuda->h_bnd_pxi4; for (int p = 0; p < mprts->nr_patches; p++) { struct psc_particles *prts = psc_mparticles_get_patch(mprts, p); struct psc_particles_cuda *cuda = psc_particles_cuda(prts); cuda->bnd_prts = new particle_single_t[cuda->bnd_n_send]; for (int n = 0; n < cuda->bnd_n_send; n++) { particle_single_t *prt = &cuda->bnd_prts[n]; prt->xi = bnd_xi4[n].x; prt->yi = bnd_xi4[n].y; prt->zi = bnd_xi4[n].z; prt->kind = cuda_float_as_int(bnd_xi4[n].w); prt->pxi = bnd_pxi4[n].x; prt->pyi = bnd_pxi4[n].y; prt->pzi = bnd_pxi4[n].z; prt->qni_wni = bnd_pxi4[n].w; } bnd_xi4 += cuda->bnd_n_send; bnd_pxi4 += cuda->bnd_n_send; } delete[] mprts_cuda->h_bnd_xi4; delete[] mprts_cuda->h_bnd_pxi4; } // ====================================================================== // cuda_mprts_copy_to_dev void cuda_mprts_copy_to_dev(struct psc_mparticles *mprts) { struct psc_mparticles_cuda *mprts_cuda = psc_mparticles_cuda(mprts); float4 *d_xi4 = mprts_cuda->d_xi4; float4 *d_pxi4 = mprts_cuda->d_pxi4; unsigned int nr_recv = 0; for (int p = 0; p < mprts->nr_patches; p++) { struct psc_particles *prts = psc_mparticles_get_patch(mprts, p); struct psc_particles_cuda *cuda = psc_particles_cuda(prts); nr_recv += cuda->bnd_n_recv; } assert(mprts_cuda->nr_prts + nr_recv <= mprts_cuda->nr_alloced); check(hipMemcpy(d_xi4 + mprts_cuda->nr_prts, mprts_cuda->h_bnd_xi4, nr_recv * sizeof(*d_xi4), hipMemcpyHostToDevice)); check(hipMemcpy(d_pxi4 + mprts_cuda->nr_prts, mprts_cuda->h_bnd_pxi4, nr_recv * sizeof(*d_pxi4), hipMemcpyHostToDevice)); free(mprts_cuda->h_bnd_xi4); free(mprts_cuda->h_bnd_pxi4); mprts_cuda->nr_prts_recv = nr_recv; mprts_cuda->nr_prts += nr_recv; } // ====================================================================== // cuda_mprts_sort void cuda_mprts_sort(struct psc_mparticles *mprts) { struct psc_mparticles_cuda *mprts_cuda = psc_mparticles_cuda(mprts); cuda_mprts_sort_pairs_device(mprts); for (int p = 0; p < mprts->nr_patches; p++) { struct psc_particles *prts = psc_mparticles_get_patch(mprts, p); struct psc_particles_cuda *cuda = psc_particles_cuda(prts); prts->n_part += cuda->bnd_n_recv - cuda->bnd_n_send; cuda->h_dev->n_part = prts->n_part; } mprts_cuda->nr_prts -= mprts_cuda->nr_prts_send; psc_mparticles_cuda_copy_to_dev(mprts); } // ====================================================================== // cuda_mprts_check_ordered_total void cuda_mprts_check_ordered_total(struct psc_mparticles *mprts) { struct psc_mparticles_cuda *mprts_cuda = psc_mparticles_cuda(mprts); cuda_mprts_find_block_indices_2_total(mprts); unsigned int last = 0; unsigned int off = 0; for (int p = 0; p < mprts->nr_patches; p++) { struct psc_particles *prts = psc_mparticles_get_patch(mprts, p); struct psc_particles_cuda *cuda = psc_particles_cuda(prts); unsigned int *bidx = new unsigned int[prts->n_part]; cuda_copy_bidx_from_dev(prts, bidx, mprts_cuda->d_bidx + off); for (int n = 0; n < prts->n_part; n++) { if (!(bidx[n] >= last && bidx[n] < mprts->nr_patches * cuda->nr_blocks)) { mprintf("p = %d, n = %d bidx = %d last = %d\n", p, n, bidx[n], last); assert(0); } last = bidx[n]; } delete[] bidx; off += prts->n_part; } }
5b671824782b027c39af27e73502a7a258baf36e.cu
#undef _GLIBCXX_USE_INT128 #include "cuda_sort2.h" #include "particles_cuda.h" #include "psc_bnd_cuda.h" #define PFX(x) xchg_##x #include "constants.c" #if 0 // FIXME const mem for dims? // FIXME probably should do our own loop rather than use blockIdx __global__ static void exchange_particles(int n_part, particles_cuda_dev_t h_dev, int ldimsx, int ldimsy, int ldimsz) { int ldims[3] = { ldimsx, ldimsy, ldimsz }; int xm[3]; for (int d = 0; d < 3; d++) { xm[d] = ldims[d] / d_consts.dxi[d]; } int i = threadIdx.x + THREADS_PER_BLOCK * blockIdx.x; if (i < n_part) { particle_cuda_real_t xi[3] = { h_dev.xi4[i].x * d_consts.dxi[0], h_dev.xi4[i].y * d_consts.dxi[1], h_dev.xi4[i].z * d_consts.dxi[2] }; int pos[3]; for (int d = 0; d < 3; d++) { pos[d] = __float2int_rd(xi[d]); } if (pos[1] < 0) { h_dev.xi4[i].y += xm[1]; if (h_dev.xi4[i].y >= xm[1]) h_dev.xi4[i].y = 0.f; } if (pos[2] < 0) { h_dev.xi4[i].z += xm[2]; if (h_dev.xi4[i].z >= xm[2]) h_dev.xi4[i].z = 0.f; } if (pos[1] >= ldims[1]) { h_dev.xi4[i].y -= xm[1]; } if (pos[2] >= ldims[2]) { h_dev.xi4[i].z -= xm[2]; } } } EXTERN_C void cuda_exchange_particles(int p, struct psc_particles *prts) { struct psc_particles_cuda *cuda = psc_particles_cuda(prts); struct psc_patch *patch = &ppsc->patch[p]; xchg_set_constants(prts, NULL); int dimBlock[2] = { THREADS_PER_BLOCK, 1 }; int dimGrid[2] = { (prts->n_part + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, 1 }; RUN_KERNEL(dimGrid, dimBlock, exchange_particles, (prts->n_part, *cuda->h_dev, patch->ldims[0], patch->ldims[1], patch->ldims[2])); } #endif // ---------------------------------------------------------------------- // cuda_mprts_find_block_indices_2_total // // like cuda_find_block_indices, but handles out-of-bound // particles __global__ static void mprts_find_block_indices_2_total(struct cuda_params prm, float4 *d_xi4, unsigned int *d_off, unsigned int *d_bidx, int nr_patches) { int tid = threadIdx.x; int block_pos[3]; block_pos[1] = blockIdx.x; block_pos[2] = blockIdx.y % prm.b_mx[2]; int bid = block_pos_to_block_idx(block_pos, prm.b_mx); int p = blockIdx.y / prm.b_mx[2]; int nr_blocks = prm.b_mx[1] * prm.b_mx[2]; // FIXME/OPT, could be done better like reorder_send_buf int block_begin = d_off[bid + p * nr_blocks]; int block_end = d_off[bid + p * nr_blocks + 1]; for (int n = block_begin + tid; n < block_end; n += THREADS_PER_BLOCK) { float4 xi4 = d_xi4[n]; unsigned int block_pos_y = __float2int_rd(xi4.y * prm.b_dxi[1]); unsigned int block_pos_z = __float2int_rd(xi4.z * prm.b_dxi[2]); int block_idx; if (block_pos_y >= prm.b_mx[1] || block_pos_z >= prm.b_mx[2]) { block_idx = nr_blocks * nr_patches; } else { block_idx = block_pos_z * prm.b_mx[1] + block_pos_y + p * nr_blocks; } d_bidx[n] = block_idx; } } EXTERN_C void cuda_mprts_find_block_indices_2_total(struct psc_mparticles *mprts) { struct psc_mparticles_cuda *mprts_cuda = psc_mparticles_cuda(mprts); if (mprts->nr_patches == 0) { return; } struct cuda_params prm; set_params(&prm, ppsc, mprts, NULL); int dimBlock[2] = { THREADS_PER_BLOCK, 1 }; int dimGrid[2] = { prm.b_mx[1], prm.b_mx[2] * mprts->nr_patches }; RUN_KERNEL(dimGrid, dimBlock, mprts_find_block_indices_2_total, (prm, mprts_cuda->d_xi4, mprts_cuda->d_off, mprts_cuda->d_bidx, mprts->nr_patches)); free_params(&prm); } // ---------------------------------------------------------------------- // cuda_mprts_find_block_keys __global__ static void mprts_find_block_keys(struct cuda_params prm, float4 *d_xi4, unsigned int *d_off, unsigned int *d_bidx, int nr_total_blocks) { int tid = threadIdx.x; int bid = blockIdx.x; int nr_blocks = prm.b_mx[1] * prm.b_mx[2]; int p = bid / nr_blocks; int block_begin = d_off[bid]; int block_end = d_off[bid + 1]; for (int n = block_begin + tid; n < block_end; n += THREADS_PER_BLOCK) { float4 xi4 = d_xi4[n]; unsigned int block_pos_y = __float2int_rd(xi4.y * prm.b_dxi[1]); unsigned int block_pos_z = __float2int_rd(xi4.z * prm.b_dxi[2]); int block_idx; if (block_pos_y >= prm.b_mx[1] || block_pos_z >= prm.b_mx[2]) { block_idx = CUDA_BND_S_OOB; } else { int bidx = block_pos_z * prm.b_mx[1] + block_pos_y + p * nr_blocks; int b_diff = bid - bidx + prm.b_mx[1] + 1; int d1 = b_diff % prm.b_mx[1]; int d2 = b_diff / prm.b_mx[1]; block_idx = d2 * 3 + d1; } d_bidx[n] = block_idx; } } EXTERN_C void cuda_mprts_find_block_keys(struct psc_mparticles *mprts) { struct psc_mparticles_cuda *mprts_cuda = psc_mparticles_cuda(mprts); if (mprts->nr_patches == 0) { return; } struct cuda_params prm; set_params(&prm, ppsc, mprts, NULL); int dimBlock[2] = { THREADS_PER_BLOCK, 1 }; int dimGrid[2] = { mprts_cuda->nr_total_blocks, 1 }; RUN_KERNEL(dimGrid, dimBlock, mprts_find_block_keys, (prm, mprts_cuda->d_xi4, mprts_cuda->d_off, mprts_cuda->d_bidx, mprts_cuda->nr_total_blocks)); free_params(&prm); } // ---------------------------------------------------------------------- // cuda_mprts_find_block_indices_ids_total __global__ static void mprts_find_block_indices_ids_total(struct cuda_params prm, float4 *d_xi4, particles_cuda_dev_t *d_cp_prts, unsigned int *d_bidx, unsigned int *d_ids, int nr_patches) { int n = threadIdx.x + THREADS_PER_BLOCK * blockIdx.x; int nr_blocks = prm.b_mx[1] * prm.b_mx[2]; unsigned int off = 0; for (int p = 0; p < nr_patches; p++) { if (n < d_cp_prts[p].n_part) { float4 xi4 = d_xi4[n + off]; unsigned int block_pos_y = __float2int_rd(xi4.y * prm.b_dxi[1]); unsigned int block_pos_z = __float2int_rd(xi4.z * prm.b_dxi[2]); int block_idx; if (block_pos_y >= prm.b_mx[1] || block_pos_z >= prm.b_mx[2]) { block_idx = -1; // not supposed to happen here! } else { block_idx = block_pos_z * prm.b_mx[1] + block_pos_y + p * nr_blocks; } d_bidx[n + off] = block_idx; d_ids[n + off] = n + off; } off += d_cp_prts[p].n_part; } } EXTERN_C void cuda_mprts_find_block_indices_ids_total(struct psc_mparticles *mprts) { struct psc_mparticles_cuda *mprts_cuda = psc_mparticles_cuda(mprts); if (mprts->nr_patches == 0) { return; } int max_n_part = 0; int nr_prts = 0; mprts_cuda->nr_prts_send = 0; for (int p = 0; p < mprts->nr_patches; p++) { struct psc_particles *prts = psc_mparticles_get_patch(mprts, p); struct psc_particles_cuda *cuda = psc_particles_cuda(prts); mprts_cuda->nr_prts_send += cuda->bnd_n_send; if (prts->n_part > max_n_part) { max_n_part = prts->n_part; } cuda->h_dev->n_part = prts->n_part; nr_prts += prts->n_part; } mprts_cuda->nr_prts = nr_prts; psc_mparticles_cuda_copy_to_dev(mprts); struct cuda_params prm; set_params(&prm, ppsc, mprts, NULL); int dimBlock[2] = { THREADS_PER_BLOCK, 1 }; int dimGrid[2] = { (max_n_part + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, 1 }; RUN_KERNEL(dimGrid, dimBlock, mprts_find_block_indices_ids_total, (prm, mprts_cuda->d_xi4, psc_mparticles_cuda(mprts)->d_dev, mprts_cuda->d_bidx, mprts_cuda->d_ids, mprts->nr_patches)); free_params(&prm); } // ====================================================================== // cuda_mprts_find_block_indices_3 EXTERN_C void cuda_mprts_find_block_indices_3(struct psc_mparticles *mprts) { struct psc_mparticles_cuda *mprts_cuda = psc_mparticles_cuda(mprts); unsigned int nr_recv = mprts_cuda->nr_prts_recv; unsigned int nr_prts_prev = mprts_cuda->nr_prts - nr_recv; // for consistency, use same block indices that we counted earlier // OPT unneeded? check(cudaMemcpy(mprts_cuda->d_bidx + nr_prts_prev, mprts_cuda->h_bnd_idx, nr_recv * sizeof(*mprts_cuda->d_bidx), cudaMemcpyHostToDevice)); // slight abuse of the now unused last part of spine_cnts check(cudaMemcpy(mprts_cuda->d_bnd_spine_cnts + 10 * mprts_cuda->nr_total_blocks, mprts_cuda->h_bnd_cnt, mprts_cuda->nr_total_blocks * sizeof(*mprts_cuda->d_bnd_spine_cnts), cudaMemcpyHostToDevice)); check(cudaMemcpy(mprts_cuda->d_alt_bidx + nr_prts_prev, mprts_cuda->h_bnd_off, nr_recv * sizeof(*mprts_cuda->d_alt_bidx), cudaMemcpyHostToDevice)); free(mprts_cuda->h_bnd_idx); free(mprts_cuda->h_bnd_off); } // ====================================================================== // mprts_reorder_send_buf_total __global__ static void mprts_reorder_send_buf_total(int nr_prts, int nr_total_blocks, unsigned int *d_bidx, unsigned int *d_sums, float4 *d_xi4, float4 *d_pxi4, float4 *d_xchg_xi4, float4 *d_xchg_pxi4) { int i = threadIdx.x + THREADS_PER_BLOCK * blockIdx.x; if (i >= nr_prts) return; if (d_bidx[i] == CUDA_BND_S_OOB) { int j = d_sums[i]; d_xchg_xi4[j] = d_xi4[i]; d_xchg_pxi4[j] = d_pxi4[i]; } } EXTERN_C void cuda_mprts_reorder_send_buf_total(struct psc_mparticles *mprts) { struct psc_mparticles_cuda *mprts_cuda = psc_mparticles_cuda(mprts); if (mprts->nr_patches == 0) return; float4 *xchg_xi4 = mprts_cuda->d_xi4 + mprts_cuda->nr_prts; float4 *xchg_pxi4 = mprts_cuda->d_pxi4 + mprts_cuda->nr_prts; assert(mprts_cuda->nr_prts + mprts_cuda->nr_prts_send < mprts_cuda->nr_alloced); int dimBlock[2] = { THREADS_PER_BLOCK, 1 }; int dimGrid[2] = { (mprts_cuda->nr_prts + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, 1 }; RUN_KERNEL(dimGrid, dimBlock, mprts_reorder_send_buf_total, (mprts_cuda->nr_prts, mprts_cuda->nr_total_blocks, mprts_cuda->d_bidx, mprts_cuda->d_sums, mprts_cuda->d_xi4, mprts_cuda->d_pxi4, xchg_xi4, xchg_pxi4)); } // ====================================================================== // psc_mparticles_cuda_swap_alt static void psc_mparticles_cuda_swap_alt(struct psc_mparticles *mprts) { struct psc_mparticles_cuda *mprts_cuda = psc_mparticles_cuda(mprts); float4 *tmp_xi4 = mprts_cuda->d_alt_xi4; float4 *tmp_pxi4 = mprts_cuda->d_alt_pxi4; mprts_cuda->d_alt_xi4 = mprts_cuda->d_xi4; mprts_cuda->d_alt_pxi4 = mprts_cuda->d_pxi4; mprts_cuda->d_xi4 = tmp_xi4; mprts_cuda->d_pxi4 = tmp_pxi4; } // ====================================================================== // cuda_mprts_reorder __global__ static void mprts_reorder(int nr_prts, unsigned int *d_ids, float4 *xi4, float4 *pxi4, float4 *alt_xi4, float4 *alt_pxi4) { int i = threadIdx.x + THREADS_PER_BLOCK * blockIdx.x; if (i < nr_prts) { int j = d_ids[i]; alt_xi4[i] = xi4[j]; alt_pxi4[i] = pxi4[j]; } } void cuda_mprts_reorder(struct psc_mparticles *mprts) { struct psc_mparticles_cuda *mprts_cuda = psc_mparticles_cuda(mprts); int dimBlock[2] = { THREADS_PER_BLOCK, 1 }; int dimGrid[2] = { (mprts_cuda->nr_prts + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, 1 }; RUN_KERNEL(dimGrid, dimBlock, mprts_reorder, (mprts_cuda->nr_prts, mprts_cuda->d_ids, mprts_cuda->d_xi4, mprts_cuda->d_pxi4, mprts_cuda->d_alt_xi4, mprts_cuda->d_alt_pxi4)); psc_mparticles_cuda_swap_alt(mprts); } // ====================================================================== // reorder_and_offsets __global__ static void mprts_reorder_and_offsets(int nr_prts, float4 *xi4, float4 *pxi4, float4 *alt_xi4, float4 *alt_pxi4, unsigned int *d_bidx, unsigned int *d_ids, unsigned int *d_off, int last_block) { int i = threadIdx.x + THREADS_PER_BLOCK * blockIdx.x; if (i > nr_prts) return; int block, prev_block; if (i < nr_prts) { alt_xi4[i] = xi4[d_ids[i]]; alt_pxi4[i] = pxi4[d_ids[i]]; block = d_bidx[i]; } else { // needed if there is no particle in the last block block = last_block; } // OPT: d_bidx[i-1] could use shmem // create offsets per block into particle array prev_block = -1; if (i > 0) { prev_block = d_bidx[i-1]; } for (int b = prev_block + 1; b <= block; b++) { d_off[b] = i; } } void cuda_mprts_reorder_and_offsets(struct psc_mparticles *mprts) { struct psc_mparticles_cuda *mprts_cuda = psc_mparticles_cuda(mprts); if (mprts->nr_patches == 0) { return; } int nr_blocks = psc_particles_cuda(psc_mparticles_get_patch(mprts, 0))->nr_blocks; int dimBlock[2] = { THREADS_PER_BLOCK, 1 }; int dimGrid[2] = { (mprts_cuda->nr_prts + 1 + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, 1 }; RUN_KERNEL(dimGrid, dimBlock, mprts_reorder_and_offsets, (mprts_cuda->nr_prts, mprts_cuda->d_xi4, mprts_cuda->d_pxi4, mprts_cuda->d_alt_xi4, mprts_cuda->d_alt_pxi4, mprts_cuda->d_bidx, mprts_cuda->d_ids, mprts_cuda->d_off, mprts->nr_patches * nr_blocks)); psc_mparticles_cuda_swap_alt(mprts); psc_mparticles_cuda_copy_to_dev(mprts); } // ====================================================================== // cuda_mprts_copy_from_dev void cuda_mprts_copy_from_dev(struct psc_mparticles *mprts) { struct psc_mparticles_cuda *mprts_cuda = psc_mparticles_cuda(mprts); if (mprts->nr_patches == 0) { return; } mprts_cuda->h_bnd_xi4 = new float4[mprts_cuda->nr_prts_send]; mprts_cuda->h_bnd_pxi4 = new float4[mprts_cuda->nr_prts_send]; check(cudaMemcpy(mprts_cuda->h_bnd_xi4, mprts_cuda->d_xi4 + mprts_cuda->nr_prts, mprts_cuda->nr_prts_send * sizeof(float4), cudaMemcpyDeviceToHost)); check(cudaMemcpy(mprts_cuda->h_bnd_pxi4, mprts_cuda->d_pxi4 + mprts_cuda->nr_prts, mprts_cuda->nr_prts_send * sizeof(float4), cudaMemcpyDeviceToHost)); } //====================================================================== // cuda_mprts_convert_from_cuda void cuda_mprts_convert_from_cuda(struct psc_mparticles *mprts) { struct psc_mparticles_cuda *mprts_cuda = psc_mparticles_cuda(mprts); if (mprts->nr_patches == 0) { return; } float4 *bnd_xi4 = mprts_cuda->h_bnd_xi4; float4 *bnd_pxi4 = mprts_cuda->h_bnd_pxi4; for (int p = 0; p < mprts->nr_patches; p++) { struct psc_particles *prts = psc_mparticles_get_patch(mprts, p); struct psc_particles_cuda *cuda = psc_particles_cuda(prts); cuda->bnd_prts = new particle_single_t[cuda->bnd_n_send]; for (int n = 0; n < cuda->bnd_n_send; n++) { particle_single_t *prt = &cuda->bnd_prts[n]; prt->xi = bnd_xi4[n].x; prt->yi = bnd_xi4[n].y; prt->zi = bnd_xi4[n].z; prt->kind = cuda_float_as_int(bnd_xi4[n].w); prt->pxi = bnd_pxi4[n].x; prt->pyi = bnd_pxi4[n].y; prt->pzi = bnd_pxi4[n].z; prt->qni_wni = bnd_pxi4[n].w; } bnd_xi4 += cuda->bnd_n_send; bnd_pxi4 += cuda->bnd_n_send; } delete[] mprts_cuda->h_bnd_xi4; delete[] mprts_cuda->h_bnd_pxi4; } // ====================================================================== // cuda_mprts_copy_to_dev void cuda_mprts_copy_to_dev(struct psc_mparticles *mprts) { struct psc_mparticles_cuda *mprts_cuda = psc_mparticles_cuda(mprts); float4 *d_xi4 = mprts_cuda->d_xi4; float4 *d_pxi4 = mprts_cuda->d_pxi4; unsigned int nr_recv = 0; for (int p = 0; p < mprts->nr_patches; p++) { struct psc_particles *prts = psc_mparticles_get_patch(mprts, p); struct psc_particles_cuda *cuda = psc_particles_cuda(prts); nr_recv += cuda->bnd_n_recv; } assert(mprts_cuda->nr_prts + nr_recv <= mprts_cuda->nr_alloced); check(cudaMemcpy(d_xi4 + mprts_cuda->nr_prts, mprts_cuda->h_bnd_xi4, nr_recv * sizeof(*d_xi4), cudaMemcpyHostToDevice)); check(cudaMemcpy(d_pxi4 + mprts_cuda->nr_prts, mprts_cuda->h_bnd_pxi4, nr_recv * sizeof(*d_pxi4), cudaMemcpyHostToDevice)); free(mprts_cuda->h_bnd_xi4); free(mprts_cuda->h_bnd_pxi4); mprts_cuda->nr_prts_recv = nr_recv; mprts_cuda->nr_prts += nr_recv; } // ====================================================================== // cuda_mprts_sort void cuda_mprts_sort(struct psc_mparticles *mprts) { struct psc_mparticles_cuda *mprts_cuda = psc_mparticles_cuda(mprts); cuda_mprts_sort_pairs_device(mprts); for (int p = 0; p < mprts->nr_patches; p++) { struct psc_particles *prts = psc_mparticles_get_patch(mprts, p); struct psc_particles_cuda *cuda = psc_particles_cuda(prts); prts->n_part += cuda->bnd_n_recv - cuda->bnd_n_send; cuda->h_dev->n_part = prts->n_part; } mprts_cuda->nr_prts -= mprts_cuda->nr_prts_send; psc_mparticles_cuda_copy_to_dev(mprts); } // ====================================================================== // cuda_mprts_check_ordered_total void cuda_mprts_check_ordered_total(struct psc_mparticles *mprts) { struct psc_mparticles_cuda *mprts_cuda = psc_mparticles_cuda(mprts); cuda_mprts_find_block_indices_2_total(mprts); unsigned int last = 0; unsigned int off = 0; for (int p = 0; p < mprts->nr_patches; p++) { struct psc_particles *prts = psc_mparticles_get_patch(mprts, p); struct psc_particles_cuda *cuda = psc_particles_cuda(prts); unsigned int *bidx = new unsigned int[prts->n_part]; cuda_copy_bidx_from_dev(prts, bidx, mprts_cuda->d_bidx + off); for (int n = 0; n < prts->n_part; n++) { if (!(bidx[n] >= last && bidx[n] < mprts->nr_patches * cuda->nr_blocks)) { mprintf("p = %d, n = %d bidx = %d last = %d\n", p, n, bidx[n], last); assert(0); } last = bidx[n]; } delete[] bidx; off += prts->n_part; } }
a08aa219bc202e69c2c89d0fc585ae84471d0440.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include "cs_cuda.h" #include "cs_whm_encode.h" __global__ void d_do_a_pair_32_2_32( int *a, int size, int offset ) { int tid = blockIdx.x*blockDim.x + threadIdx.x; int f, ff ; if ( tid < ( size >> 1 )) { f = ( tid / offset ) * ( offset << 1 ) ; tid = f + tid % offset ; f = a[ tid ] ; ff = a[ tid + offset ] ; a[ tid ] = f + ff ; a[ tid + offset ] = f - ff ; } } __global__ void d_do_a_pair_8_2_32( unsigned char *from, int size, int offset, int *to ) { int tid = blockIdx.x*blockDim.x + threadIdx.x; int f, ff ; if ( tid < ( size >> 1 )) { f = ( tid / offset ) * ( offset << 1 ) ; tid = f + tid % offset ; f = from[ tid ] ; ff = from[ tid + offset ] ; to[ tid ] = f + ff ; to[ tid + offset ] = f - ff ; } } void cs_whm_measurement( char *d_from, int *d_to, int n ) { int first, loop, offset ; int nThreadsPerBlock = CUDA_MAX_THREADS_P_BLK ; int nBlocks = ( n + ( nThreadsPerBlock - 1 )) / nThreadsPerBlock ; first = 1 ; loop = n ; loop >>= 1 ; offset = 1 ; while ( loop > 0 ) { #ifdef CUDA_OBS fprintf( stderr, "cs_whm_measurement: f %p t %p cnt %d first %d " "loop %d offset %d nblk %d \n", d_from, d_to, n, first, loop, offset, nBlocks ) ; #endif if ( first ) { hipLaunchKernelGGL(( d_do_a_pair_8_2_32) , dim3(nBlocks), dim3(nThreadsPerBlock) , 0, 0, ( unsigned char * )d_from, n, offset, d_to ) ; first = 0 ; } else hipLaunchKernelGGL(( d_do_a_pair_32_2_32) , dim3(nBlocks), dim3(nThreadsPerBlock) , 0, 0, d_to, n, offset ) ; hipDeviceSynchronize() ; offset <<= 1 ; loop >>= 1 ; } }
a08aa219bc202e69c2c89d0fc585ae84471d0440.cu
#include <stdio.h> #include <stdlib.h> #include "cs_cuda.h" #include "cs_whm_encode.h" __global__ void d_do_a_pair_32_2_32( int *a, int size, int offset ) { int tid = blockIdx.x*blockDim.x + threadIdx.x; int f, ff ; if ( tid < ( size >> 1 )) { f = ( tid / offset ) * ( offset << 1 ) ; tid = f + tid % offset ; f = a[ tid ] ; ff = a[ tid + offset ] ; a[ tid ] = f + ff ; a[ tid + offset ] = f - ff ; } } __global__ void d_do_a_pair_8_2_32( unsigned char *from, int size, int offset, int *to ) { int tid = blockIdx.x*blockDim.x + threadIdx.x; int f, ff ; if ( tid < ( size >> 1 )) { f = ( tid / offset ) * ( offset << 1 ) ; tid = f + tid % offset ; f = from[ tid ] ; ff = from[ tid + offset ] ; to[ tid ] = f + ff ; to[ tid + offset ] = f - ff ; } } void cs_whm_measurement( char *d_from, int *d_to, int n ) { int first, loop, offset ; int nThreadsPerBlock = CUDA_MAX_THREADS_P_BLK ; int nBlocks = ( n + ( nThreadsPerBlock - 1 )) / nThreadsPerBlock ; first = 1 ; loop = n ; loop >>= 1 ; offset = 1 ; while ( loop > 0 ) { #ifdef CUDA_OBS fprintf( stderr, "cs_whm_measurement: f %p t %p cnt %d first %d " "loop %d offset %d nblk %d \n", d_from, d_to, n, first, loop, offset, nBlocks ) ; #endif if ( first ) { d_do_a_pair_8_2_32 <<< nBlocks, nThreadsPerBlock >>> (( unsigned char * )d_from, n, offset, d_to ) ; first = 0 ; } else d_do_a_pair_32_2_32 <<< nBlocks, nThreadsPerBlock >>> ( d_to, n, offset ) ; cudaThreadSynchronize() ; offset <<= 1 ; loop >>= 1 ; } }
62b5aa1616f5fad09182989fe84d07930d855f08.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include "..\..\cuda_by_example\common\book.h" int main(void) { hipDeviceProp_t dev; int count; HANDLE_ERROR(hipGetDeviceCount(&count)); for (int i = 0; i< count; i++) { HANDLE_ERROR(hipGetDeviceProperties(&dev, i)); printf(" --- General Info about Device %d ---\n", i); printf("Name: %s\n",dev.name); printf("Compute capability: %d.%d\n",dev.major,dev.minor); printf("Clock rate: %d\n", dev.clockRate); printf("Device copy overlap: "); if (dev.deviceOverlap) printf("Enabled\n"); else printf("Disabled\n"); printf("Kernel execution timeout : "); if (dev.kernelExecTimeoutEnabled) printf("Enabled\n"); else printf("Disabled\n"); printf(" --- Memory Info for device %d ---\n", i); printf("Total global mem: %ld\n", dev.totalGlobalMem); printf("Total constant Mem: %ld\n", dev.totalConstMem); printf("Max mem pitch: %ld\n", dev.memPitch); printf("Texture Alignment: %ld\n", dev.textureAlignment); printf(" --- MP Information for device %d ---\n", i); printf("Multiprocessor count: %d\n",dev.multiProcessorCount); printf("Shared mem per mp: %ld\n", dev.sharedMemPerBlock); printf("Registers per mp: %d\n", dev.regsPerBlock); printf("Threads in warp: %d\n", dev.warpSize); printf("Max threads per block: %d\n",dev.maxThreadsPerBlock); printf("Max thread dimensions: (%d, %d, %d)\n",dev.maxThreadsDim[0], dev.maxThreadsDim[1],dev.maxThreadsDim[2]); printf("Max grid dimensions: (%d, %d, %d)\n",dev.maxGridSize[0], dev.maxGridSize[1],dev.maxGridSize[2]); printf("\n"); } char done = 'n'; printf("done?"); scanf("%c", done); }
62b5aa1616f5fad09182989fe84d07930d855f08.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include "..\..\cuda_by_example\common\book.h" int main(void) { cudaDeviceProp dev; int count; HANDLE_ERROR(cudaGetDeviceCount(&count)); for (int i = 0; i< count; i++) { HANDLE_ERROR(cudaGetDeviceProperties(&dev, i)); printf(" --- General Info about Device %d ---\n", i); printf("Name: %s\n",dev.name); printf("Compute capability: %d.%d\n",dev.major,dev.minor); printf("Clock rate: %d\n", dev.clockRate); printf("Device copy overlap: "); if (dev.deviceOverlap) printf("Enabled\n"); else printf("Disabled\n"); printf("Kernel execution timeout : "); if (dev.kernelExecTimeoutEnabled) printf("Enabled\n"); else printf("Disabled\n"); printf(" --- Memory Info for device %d ---\n", i); printf("Total global mem: %ld\n", dev.totalGlobalMem); printf("Total constant Mem: %ld\n", dev.totalConstMem); printf("Max mem pitch: %ld\n", dev.memPitch); printf("Texture Alignment: %ld\n", dev.textureAlignment); printf(" --- MP Information for device %d ---\n", i); printf("Multiprocessor count: %d\n",dev.multiProcessorCount); printf("Shared mem per mp: %ld\n", dev.sharedMemPerBlock); printf("Registers per mp: %d\n", dev.regsPerBlock); printf("Threads in warp: %d\n", dev.warpSize); printf("Max threads per block: %d\n",dev.maxThreadsPerBlock); printf("Max thread dimensions: (%d, %d, %d)\n",dev.maxThreadsDim[0], dev.maxThreadsDim[1],dev.maxThreadsDim[2]); printf("Max grid dimensions: (%d, %d, %d)\n",dev.maxGridSize[0], dev.maxGridSize[1],dev.maxGridSize[2]); printf("\n"); } char done = 'n'; printf("done?"); scanf("%c", done); }
7df020531d78fcf52e854fecb8e3c7affff3abea.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * \file dnn/src/cuda/topk/topk_radix.cu * MegEngine is Licensed under the Apache License, Version 2.0 (the "License") * * Copyright (c) 2014-2021 Megvii Inc. All rights reserved. * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ #include "./topk_radix.cuh" #include "src/cuda/hipcub/hipcub.hpp" #include "src/cuda/cuda_shfl_compat.cuh" #include "src/cuda/utils.cuh" #include <algorithm> #include <cmath> #if __CUDACC_VER_MAJOR__ < 9 #pragma message "topk is a little slower on cuda earlier than 9.0" // on cuda 9.0 and later, due to thread-divergent branches we should use // __syncwarp; and I am too lazy to implement a correct legacy version, so just // use __syncthreads instead for older cuda #define __syncwarp __syncthreads #endif using namespace megdnn; using namespace cuda; using namespace topk; using namespace internal; namespace cuda_topk_impl { const uint32_t WARP_SIZE = 32; static __device__ __forceinline__ uint32_t u32_from_64_low(uint64_t x) { return x; } static __device__ __forceinline__ uint32_t u32_from_64_high(uint64_t x) { return x >> 32; } template <uint32_t x> struct static_log2 { static const uint32_t val = static_log2<x / 2>::val + 1; }; template <> struct static_log2<1> { static const uint32_t val = 0; }; template <uint32_t SIZE, typename T = uint32_t> struct DeviceScanPackedItem; template <typename T> struct DeviceScanPackedItem<1, T> { __device__ __forceinline__ T load(T* data, uint32_t tid) { return data[tid]; } __device__ __forceinline__ void store(T* data, uint32_t tid, uint32_t s) { data[tid] = s; } }; template <> struct DeviceScanPackedItem<4, uint8_t> { uint8_t d0, d1, d2, d3; __device__ __forceinline__ uint32_t load(uint8_t* data, uint32_t tid) { uint32_t item = reinterpret_cast<uint32_t*>(data)[tid]; d3 = item >> 24; d2 = (item >> 16) & 0xFF; d1 = (item >> 8) & 0xFF; d0 = item & 0xFF; return d0 + d1 + d2 + d3; } __device__ __forceinline__ void store(uint8_t* data, uint32_t tid, uint32_t s) { uint8_t o3 = s, o2 = o3 - d3, o1 = o2 - d2, o0 = o1 - d1; reinterpret_cast<uint32_t*>(data)[tid] = (o3 << 24) | (o2 << 16) | (o1 << 8) | o0; } }; //! inclusive scan within a warp using register shuffle template <uint32_t SIZE> __device__ __forceinline__ uint32_t device_scan_shfl_core(uint32_t s, uint32_t tid) { static const uint32_t SIZE_LOG2 = static_log2<SIZE>::val; uint32_t self_lane = tid % SIZE; #pragma unroll for (uint32_t step_log2 = 1; step_log2 <= SIZE_LOG2; ++step_log2) { uint32_t from_lane = (self_lane & ~((1u << step_log2) - 1)) + ((1 << (step_log2 - 1)) - 1); uint32_t valid_mask = (from_lane >= self_lane) - 1; uint32_t s_below = __shfl_up(s, self_lane - from_lane, SIZE); s += s_below & valid_mask; } return s; } /*! * \brief compute inplace inclusive prefix sum of \p data * * Note: no synchronization at the end */ template <uint32_t SIZE, uint32_t NR_SHARD> __device__ __forceinline__ void device_scan(uint32_t* data, uint32_t tid, uint32_t shard) { const uint32_t NR_WARP = SIZE / NR_SHARD / WARP_SIZE; #if __cplusplus > 199711L static_assert(NR_WARP <= WARP_SIZE || (NR_WARP & (NR_WARP - 1)), "bad params"); #endif __syncthreads(); DeviceScanPackedItem<NR_SHARD> packed_item; uint32_t s = packed_item.load(data, tid); s = device_scan_shfl_core<WARP_SIZE>(s, tid); // sync between warps __shared__ uint32_t warp_sums_storage[NR_SHARD][NR_WARP]; uint32_t warp_id = tid / WARP_SIZE; uint32_t* warp_sums = warp_sums_storage[shard]; if ((tid & (WARP_SIZE - 1)) == WARP_SIZE - 1) { warp_sums[warp_id] = s; } __syncthreads(); for (uint32_t i = 0; i < warp_id; ++i) { s += warp_sums[i]; } packed_item.store(data, tid, s); } template <uint32_t PACK_SIZE, typename T> __device__ __forceinline__ void device_scan_packed_accu32(T* data, uint32_t tid) { DeviceScanPackedItem<PACK_SIZE, T> scan_pack; __syncwarp(); uint32_t sum = scan_pack.load(data, tid); sum = device_scan_shfl_core<WARP_SIZE>(sum, tid); scan_pack.store(data, tid, sum); __syncwarp(); } namespace kth { const uint32_t BUCKET_BITS = 8, NR_BUCKET = 1 << BUCKET_BITS, LOCAL_CNT_SHARD = 16, BLOCK_DIM = NR_BUCKET * 4; template <uint32_t v> struct enforce_const_u32 { static const uint32_t val = v; }; /*! * \brief compute scattered histogram for the whole input * * launch config: grid(X, batch), thread(BLOCK_DIM) * * Keys not starting with given prefix would be treated as max * * \param[in] input [batch, length] * \param[out] buckets [batch, X, NR_BUCKET] */ template <typename ctype, bool prefix_valid, uint32_t shift> static __global__ void compute_histogram(const ctype* input, uint32_t* bucket_cnt, uint32_t length, int32_t lda, uint32_t* prefix_ptr) { // note that this layout eliminates bank conflict __shared__ uint32_t local_cnt[NR_BUCKET][LOCAL_CNT_SHARD]; int32_t batch = blockIdx.y; input += batch * lda; bucket_cnt += (batch * gridDim.x + blockIdx.x) * NR_BUCKET; uint32_t prefix; if (prefix_valid) { prefix = prefix_ptr[batch]; } { // init local_cnt uint32_t* p = &local_cnt[0][0]; for (uint32_t i = threadIdx.x; i < LOCAL_CNT_SHARD * NR_BUCKET; i += BLOCK_DIM) { p[i] = 0; } __syncthreads(); } { // accumulate uint32_t i = blockIdx.x * BLOCK_DIM + threadIdx.x, stride = BLOCK_DIM * gridDim.x; uint32_t* dst = &local_cnt[0][threadIdx.x % LOCAL_CNT_SHARD]; while (i < length) { uint32_t key = RadixConverter<ctype>::to_radix(input[i]); if (prefix_valid) { const uint32_t mask = ((~0u) << ((prefix_valid ? shift : 0) + BUCKET_BITS)); key |= ((key & enforce_const_u32<mask>::val) == prefix) - 1; } uint32_t idx = (key >> shift) & ((1 << BUCKET_BITS) - 1); atomicAdd(dst + idx * LOCAL_CNT_SHARD, 1); i += stride; } } __syncthreads(); if (threadIdx.x < NR_BUCKET) { uint32_t s = 0; #pragma unroll for (int i = 0; i < LOCAL_CNT_SHARD; ++i) { s += local_cnt[threadIdx.x][(i + threadIdx.x) % LOCAL_CNT_SHARD]; } bucket_cnt[threadIdx.x] = s; } } /*! * \brief update the values in \p prefix to k'th value in according to bucket * count, and update \p k * * launch config: grid(batch), thread(NR_BUCKET) */ template <bool first, bool last, uint32_t shift, typename ctype> static __global__ void update_prefix_and_k(const uint32_t* bucket_cnt, uint32_t* prefix, uint32_t* k, uint32_t k_init, uint32_t bucket_sharding_size, ctype* result) { __shared__ uint32_t cumsum_bucket_cnt[NR_BUCKET + 1]; uint32_t batch = blockIdx.x; bucket_cnt += batch * bucket_sharding_size * NR_BUCKET; uint32_t sum = 0; for (uint32_t i = 0; i < bucket_sharding_size; ++i) { sum += bucket_cnt[i * NR_BUCKET + threadIdx.x]; } if (!threadIdx.x) { cumsum_bucket_cnt[0] = 0; } const uint32_t i = threadIdx.x + 1; cumsum_bucket_cnt[i] = sum; device_scan<NR_BUCKET, 1>(cumsum_bucket_cnt + 1, threadIdx.x, 0); __syncthreads(); uint32_t kv = first ? k_init : k[batch]; if ((cumsum_bucket_cnt[i] >= kv) & (cumsum_bucket_cnt[i - 1] < kv)) { uint32_t b = (i - 1) << shift; if (first) { prefix[batch] = b; } else if (last) { result[batch] = RadixConverter<ctype>::from_radix(prefix[batch] | b); } else { prefix[batch] |= b; } if (!last) { k[batch] = kv - cumsum_bucket_cnt[i - 1]; } } if ((cumsum_bucket_cnt[NR_BUCKET] < kv) | (cumsum_bucket_cnt[i] != cumsum_bucket_cnt[i - 1] + sum)) { // impossible int* bad = 0x0; *bad = 23; } } static uint32_t get_grid_dim_x(uint32_t length) { return std::max<uint32_t>(length / (128 * BLOCK_DIM), 1); } } // namespace kth /*! * \brief select values smaller or larger than given threshold * * Note: we use register shuffle extensively to perform both reduce and scan. */ namespace select { struct LessPred { template <typename ctype> __device__ __forceinline__ static bool cmp(ctype x, ctype y) { return x < y; } }; struct GreaterPred { template <typename ctype> __device__ __forceinline__ static bool cmp(ctype x, ctype y) { return x > y; } }; const uint32_t REDUCE_WARP_SIZE = 16, REDUCE_SIZE = WARP_SIZE * 4, REDUCE_SHARD = 64; /*! * \brief reduce number of elements satisfying Pred in (N, M) mat to * (N, ceil(M / REDUCE_SIZE)) * * launch config: grid(X, batch), * thread(REDUCE_WARP_SIZE, REDUCE_SHARD) * * Each block computes REDUCE_SHARD outputs */ template <typename ctype, class Pred> static __global__ void kern_reduce_block_cnt(const ctype* input_data, const ctype* input_thresh, uint32_t length, int32_t lda, uint64_t* output, uint32_t output_width) { static const uint32_t BLOCK_DIM_X = REDUCE_WARP_SIZE, BLOCK_DIM_Y = REDUCE_SHARD; uint32_t batch = blockIdx.y, out_col = blockIdx.x * BLOCK_DIM_Y + threadIdx.y, col_begin = out_col * REDUCE_SIZE, col_end = min(col_begin + REDUCE_SIZE, length), tid_local = threadIdx.x; if (out_col >= output_width) { return; } uint32_t thresh = RadixConverter<ctype>::to_radix(input_thresh[batch]); input_data += static_cast<int32_t>(batch) * lda; uint32_t sum_eq = 0, sum_lt = 0; for (uint32_t i = col_begin + tid_local; i < col_end; i += BLOCK_DIM_X) { uint32_t iv = RadixConverter<ctype>::to_radix(input_data[i]); sum_eq += iv == thresh; sum_lt += Pred::cmp(iv, thresh); } #pragma unroll for (uint32_t step = REDUCE_WARP_SIZE / 2; step >= 1; step >>= 1) { sum_eq += __shfl_down(sum_eq, step, REDUCE_WARP_SIZE); sum_lt += __shfl_down(sum_lt, step, REDUCE_WARP_SIZE); } // reduce warp results to a single scalar if (!tid_local) { output[batch * output_width + out_col] = (static_cast<uint64_t>(sum_eq) << 32) | sum_lt; } } static MEGDNN_NOINLINE hipError_t invoke_cub_scan(const uint64_t* input, uint64_t* output, void* workspace, size_t& workspace_size, uint32_t size, hipStream_t stream) { return hipcub::DeviceScan::InclusiveSum(workspace, workspace_size, input, output, size, stream); } static __global__ void kern_init_zero(uint64_t* dst) { dst[0] = 0; } /*! * \brief copy top-k values of each row from input to output * * launch config: grid(X, batch), * thread(WARP_SIZE, COPY_SHARD) */ template <typename ctype, class Pred, int COPY_SHARD> static __global__ void kern_copy(const ctype* input_data, const ctype* input_thresh, const uint64_t* scan, uint32_t scan_width, ctype* output_value, int32_t* output_idx, uint32_t length, uint32_t k, int32_t lda) { #if __cplusplus > 199711L static_assert(REDUCE_SIZE < 256, "local_sum_storage can not be uint8_t"); #endif static const uint32_t BLOCK_DIM_X = WARP_SIZE, BLOCK_DIM_Y = COPY_SHARD; uint32_t scan_col = blockIdx.x * BLOCK_DIM_Y + threadIdx.y; if (scan_col >= scan_width) { return; } uint32_t batch = blockIdx.y, inp_col_begin = min(scan_col * REDUCE_SIZE, length), inp_col_length = min(inp_col_begin + REDUCE_SIZE, length) - inp_col_begin, tid_local = threadIdx.x; uint32_t thresh = RadixConverter<ctype>::to_radix(input_thresh[batch]); input_data += static_cast<int32_t>(batch) * lda + static_cast<int>(inp_col_begin); __shared__ uint8_t local_sum_storage[BLOCK_DIM_Y][2][REDUCE_SIZE + 4]; uint8_t *local_sum_eq = local_sum_storage[threadIdx.y][0], *local_sum_lt = local_sum_storage[threadIdx.y][1]; if (!tid_local) { local_sum_eq[3] = 0; local_sum_lt[3] = 0; } local_sum_eq += 4; local_sum_lt += 4; const uint32_t WORKLOAD = REDUCE_SIZE / WARP_SIZE; #pragma unroll for (uint32_t j = 0; j < WORKLOAD; ++j) { uint32_t i = j * BLOCK_DIM_X + tid_local; if (i < inp_col_length) { uint32_t iv = RadixConverter<ctype>::to_radix(input_data[i]); local_sum_eq[i] = iv == thresh; local_sum_lt[i] = Pred::cmp(iv, thresh); } else { local_sum_eq[i] = 0; local_sum_lt[i] = 0; } } device_scan_packed_accu32<WORKLOAD, uint8_t>(local_sum_eq, tid_local); device_scan_packed_accu32<WORKLOAD, uint8_t>(local_sum_lt, tid_local); scan += batch * scan_width; uint64_t scan_prev_pack = scan[static_cast<int>(scan_col) - 1], k_offset_pack = scan_prev_pack - scan[-1], scan_self_pack = scan[scan_col] - scan_prev_pack; #define unpack(name) \ uint32_t name##_eq = u32_from_64_high(name##_pack), \ name##_lt = u32_from_64_low(name##_pack) unpack(k_offset); unpack(scan_self); #undef unpack uint32_t allowed_eq = k - min(k, (u32_from_64_low(scan[scan_width - 1]) - u32_from_64_low(scan[-1]))), ls_lt_max = k - min(k_offset_lt, k), ls_eq_max = allowed_eq - min(allowed_eq, k_offset_eq); if ((scan_self_lt && ls_lt_max) || (scan_self_eq && ls_eq_max)) { #pragma unroll for (uint32_t j = 0; j < WORKLOAD; ++j) { int32_t i = j * BLOCK_DIM_X + tid_local; uint32_t cur_lt = local_sum_lt[i], cur_eq = local_sum_eq[i]; bool is_lt = cur_lt <= ls_lt_max && cur_lt != local_sum_lt[i - 1]; bool is_eq = cur_eq <= ls_eq_max && cur_eq != local_sum_eq[i - 1]; // exactly one should be true if (is_lt || is_eq) { uint32_t off_lt = cur_lt + k_offset_lt - 1; uint32_t off_eq = cur_eq + k_offset_eq - 1 + (k - allowed_eq); uint32_t ocol = is_lt ? off_lt : off_eq; output_value[batch * k + ocol] = input_data[i]; output_idx[batch * k + ocol] = i + inp_col_begin; } } } } //! get workspace for scan, aligned to uint64_t static size_t get_scan_workspace(uint32_t size) { size_t wk = 0; hipError_t err = invoke_cub_scan(NULL, NULL, NULL, wk, size, NULL); if (err != hipSuccess) { fprintf(stderr, "topk: cub scan failed: %s (%d)\n", hipGetErrorString(err), static_cast<int>(err)); megdnn_trap(); } return ((wk - 1) / sizeof(uint64_t) + 1) * sizeof(uint64_t); } } // namespace select } // namespace cuda_topk_impl uint32_t topk::find_kth_radix_workspace(uint32_t batch, uint32_t length, uint32_t grid_dim_y_limit) { using namespace cuda_topk_impl::kth; uint32_t limit = batch > grid_dim_y_limit ? grid_dim_y_limit : batch; return (limit * get_grid_dim_x(length) * NR_BUCKET + limit * 2) * sizeof(uint32_t); } template <typename ctype> hipError_t topk::find_kth_radix(const ctype* input, ctype* output, void* workspace, uint32_t batch, uint32_t length, int32_t lda, int32_t k, uint32_t grid_dim_y_limit, hipStream_t stream) { using namespace cuda_topk_impl::kth; if (!k) { return hipErrorUnknown; } if (k < 0) { k = length + k + 1; } if (!(BUCKET_BITS == 8 && (sizeof(ctype) == 4 || sizeof(ctype) == 2))) { // no c++11 in megdnn cuda; so we just trap instead of using static // assert megdnn_trap(); } uint32_t batch_idx = 0; uint32_t grid_dim_x = get_grid_dim_x(length); uint32_t grid_dim_y = 1; while (batch_idx < batch) { if (batch - batch_idx >= grid_dim_y_limit) { grid_dim_y = grid_dim_y_limit; } else { grid_dim_y = batch - batch_idx; } dim3 grid_dim(grid_dim_x, grid_dim_y); uint32_t* dev_k = static_cast<uint32_t*>(workspace); uint32_t* dev_prefix = dev_k + grid_dim_y; uint32_t* bucket_cnt = dev_prefix + grid_dim_y; hipLaunchKernelGGL(( compute_histogram<ctype, false, 24>), dim3(grid_dim), dim3(BLOCK_DIM), 0, stream, input + batch_idx * lda, bucket_cnt, length, lda, nullptr); // use float to make compiler happy; it is not used since last == false hipLaunchKernelGGL(( update_prefix_and_k<true, false, 24, float>) , dim3(grid_dim_y), dim3(NR_BUCKET), 0, stream, bucket_cnt, dev_prefix, dev_k, k, grid_dim_x, nullptr); hipLaunchKernelGGL(( compute_histogram<ctype, true, 16>), dim3(grid_dim), dim3(BLOCK_DIM), 0, stream, input + batch_idx * lda, bucket_cnt, length, lda, dev_prefix); hipLaunchKernelGGL(( update_prefix_and_k<false, false, 16, float>) , dim3(grid_dim_y), dim3(NR_BUCKET), 0, stream, bucket_cnt, dev_prefix, dev_k, k, grid_dim_x, nullptr); hipLaunchKernelGGL(( compute_histogram<ctype, true, 8>), dim3(grid_dim), dim3(BLOCK_DIM), 0, stream, input + batch_idx * lda, bucket_cnt, length, lda, dev_prefix); hipLaunchKernelGGL(( update_prefix_and_k<false, false, 8, float>) , dim3(grid_dim_y), dim3(NR_BUCKET), 0, stream, bucket_cnt, dev_prefix, dev_k, k, grid_dim_x, nullptr); hipLaunchKernelGGL(( compute_histogram<ctype, true, 0>), dim3(grid_dim), dim3(BLOCK_DIM), 0, stream, input + batch_idx * lda, bucket_cnt, length, lda, dev_prefix); hipLaunchKernelGGL(( update_prefix_and_k<false, true, 0, ctype>) , dim3(grid_dim_y), dim3(NR_BUCKET), 0, stream, bucket_cnt, dev_prefix, dev_k, k, grid_dim_x, output + batch_idx); batch_idx += grid_dim_y; } return hipGetLastError(); } template <typename ctype> hipError_t topk::topk_select(const ctype* input, const ctype* thresh, ctype* output_value, int32_t* output_idx, void* workspace, uint32_t batch, uint32_t length, int32_t lda, int32_t k, uint32_t batch_upper_limit, hipStream_t stream) { using namespace cuda_topk_impl; using namespace cuda_topk_impl::select; uint32_t length_split = DIVUP(length, REDUCE_SIZE); void (*kptr_reduce_block_cnt)(const ctype*, const ctype*, uint32_t, int32_t, uint64_t*, uint32_t); void (*kptr_copy)(const ctype*, const ctype*, const uint64_t*, uint32_t, ctype*, int32_t*, uint32_t, uint32_t, int32_t); int kern_copy_shard; { int grid, block; hipError_t err = hipOccupancyMaxPotentialBlockSize( &grid, &block, kern_copy<ctype, GreaterPred, 32>); if (err) { return err; } kern_copy_shard = block / (WARP_SIZE * 8) * 8; if (!kern_copy_shard) { fprintf(stderr, "topk: failed to launch: block=%d\n", block); return hipErrorLaunchOutOfResources; } } #define CASE_SHARD_ON(pred, n) \ case n: \ kptr_copy = kern_copy<ctype, pred, n>; \ break #define CASE_SHARD(pred) \ switch (kern_copy_shard) { \ CASE_SHARD_ON(pred, 8); \ CASE_SHARD_ON(pred, 16); \ CASE_SHARD_ON(pred, 24); \ CASE_SHARD_ON(pred, 32); \ default: \ fprintf(stderr, "topk: failed to launch: shard=%d\n", \ kern_copy_shard); \ return hipErrorLaunchOutOfResources; \ } if (k < 0) { k = -k; kptr_reduce_block_cnt = kern_reduce_block_cnt<ctype, GreaterPred>; CASE_SHARD(GreaterPred); } else { kptr_reduce_block_cnt = kern_reduce_block_cnt<ctype, LessPred>; CASE_SHARD(LessPred); } #undef CASE_SHARD #undef CASE_SHARD_ON uint32_t batch_idx = 0; uint32_t batch_real = 1; while (batch_idx < batch) { if (batch - batch_idx >= batch_upper_limit) { batch_real = batch_upper_limit; } else { batch_real = batch - batch_idx; } size_t scan_size = batch_real * length_split; size_t scan_wk = get_scan_workspace(scan_size); uint64_t *scan_inp = static_cast<uint64_t*>(workspace) + scan_wk / sizeof(uint64_t), *scan_out = scan_inp + scan_size; // reduce to scan_inp hipLaunchKernelGGL(( kptr_reduce_block_cnt), dim3(dim3(DIVUP(length_split, REDUCE_SHARD), batch_real)), dim3(dim3(REDUCE_WARP_SIZE, REDUCE_SHARD)), 0, stream, input + batch_idx * lda, thresh + batch_idx, length, lda, scan_inp, length_split); // scan to scan_out scan_out += 1; // set scan[-1] to 0 hipError_t err = invoke_cub_scan(scan_inp, scan_out, workspace, scan_wk, scan_size, stream); if (err != hipSuccess) { return err; } hipLaunchKernelGGL(( kern_init_zero), dim3(1), dim3(1), 0, stream, scan_out - 1); // copy result hipLaunchKernelGGL(( kptr_copy), dim3(dim3(DIVUP(length_split, kern_copy_shard), batch_real)), dim3(dim3(WARP_SIZE, kern_copy_shard)), 0, stream, input + batch_idx * lda, thresh + batch_idx, scan_out, length_split, output_value + std::abs(k) * batch_idx, output_idx + std::abs(k) * batch_idx, length, k, lda); batch_idx += batch_real; } return hipGetLastError(); } uint32_t topk::topk_select_workspace(uint32_t batch, uint32_t length) { using namespace cuda_topk_impl::select; size_t scan_size = batch * DIVUP(length, REDUCE_SIZE); return get_scan_workspace(scan_size) + sizeof(uint64_t) * (scan_size * 2 + 1); } namespace megdnn { namespace cuda { namespace topk { #define INST(t) \ template hipError_t find_kth_radix<t>(const t*, t*, void*, uint32_t, \ uint32_t, int32_t, int32_t, \ uint32_t, hipStream_t); \ template hipError_t topk_select<t>(const t*, const t*, t*, int32_t*, \ void*, uint32_t, uint32_t, int32_t, \ int32_t, uint32_t, hipStream_t) INST(float); INST(int32_t); DNN_INC_FLOAT16(INST(dt_float16)); #undef INST } // namespace topk } // namespace cuda } // namespace megdnn // vim: ft=cuda syntax=cuda.doxygen
7df020531d78fcf52e854fecb8e3c7affff3abea.cu
/** * \file dnn/src/cuda/topk/topk_radix.cu * MegEngine is Licensed under the Apache License, Version 2.0 (the "License") * * Copyright (c) 2014-2021 Megvii Inc. All rights reserved. * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ #include "./topk_radix.cuh" #include "src/cuda/cub/device/device_scan.cuh" #include "src/cuda/cuda_shfl_compat.cuh" #include "src/cuda/utils.cuh" #include <algorithm> #include <cmath> #if __CUDACC_VER_MAJOR__ < 9 #pragma message "topk is a little slower on cuda earlier than 9.0" // on cuda 9.0 and later, due to thread-divergent branches we should use // __syncwarp; and I am too lazy to implement a correct legacy version, so just // use __syncthreads instead for older cuda #define __syncwarp __syncthreads #endif using namespace megdnn; using namespace cuda; using namespace topk; using namespace internal; namespace cuda_topk_impl { const uint32_t WARP_SIZE = 32; static __device__ __forceinline__ uint32_t u32_from_64_low(uint64_t x) { return x; } static __device__ __forceinline__ uint32_t u32_from_64_high(uint64_t x) { return x >> 32; } template <uint32_t x> struct static_log2 { static const uint32_t val = static_log2<x / 2>::val + 1; }; template <> struct static_log2<1> { static const uint32_t val = 0; }; template <uint32_t SIZE, typename T = uint32_t> struct DeviceScanPackedItem; template <typename T> struct DeviceScanPackedItem<1, T> { __device__ __forceinline__ T load(T* data, uint32_t tid) { return data[tid]; } __device__ __forceinline__ void store(T* data, uint32_t tid, uint32_t s) { data[tid] = s; } }; template <> struct DeviceScanPackedItem<4, uint8_t> { uint8_t d0, d1, d2, d3; __device__ __forceinline__ uint32_t load(uint8_t* data, uint32_t tid) { uint32_t item = reinterpret_cast<uint32_t*>(data)[tid]; d3 = item >> 24; d2 = (item >> 16) & 0xFF; d1 = (item >> 8) & 0xFF; d0 = item & 0xFF; return d0 + d1 + d2 + d3; } __device__ __forceinline__ void store(uint8_t* data, uint32_t tid, uint32_t s) { uint8_t o3 = s, o2 = o3 - d3, o1 = o2 - d2, o0 = o1 - d1; reinterpret_cast<uint32_t*>(data)[tid] = (o3 << 24) | (o2 << 16) | (o1 << 8) | o0; } }; //! inclusive scan within a warp using register shuffle template <uint32_t SIZE> __device__ __forceinline__ uint32_t device_scan_shfl_core(uint32_t s, uint32_t tid) { static const uint32_t SIZE_LOG2 = static_log2<SIZE>::val; uint32_t self_lane = tid % SIZE; #pragma unroll for (uint32_t step_log2 = 1; step_log2 <= SIZE_LOG2; ++step_log2) { uint32_t from_lane = (self_lane & ~((1u << step_log2) - 1)) + ((1 << (step_log2 - 1)) - 1); uint32_t valid_mask = (from_lane >= self_lane) - 1; uint32_t s_below = __shfl_up(s, self_lane - from_lane, SIZE); s += s_below & valid_mask; } return s; } /*! * \brief compute inplace inclusive prefix sum of \p data * * Note: no synchronization at the end */ template <uint32_t SIZE, uint32_t NR_SHARD> __device__ __forceinline__ void device_scan(uint32_t* data, uint32_t tid, uint32_t shard) { const uint32_t NR_WARP = SIZE / NR_SHARD / WARP_SIZE; #if __cplusplus > 199711L static_assert(NR_WARP <= WARP_SIZE || (NR_WARP & (NR_WARP - 1)), "bad params"); #endif __syncthreads(); DeviceScanPackedItem<NR_SHARD> packed_item; uint32_t s = packed_item.load(data, tid); s = device_scan_shfl_core<WARP_SIZE>(s, tid); // sync between warps __shared__ uint32_t warp_sums_storage[NR_SHARD][NR_WARP]; uint32_t warp_id = tid / WARP_SIZE; uint32_t* warp_sums = warp_sums_storage[shard]; if ((tid & (WARP_SIZE - 1)) == WARP_SIZE - 1) { warp_sums[warp_id] = s; } __syncthreads(); for (uint32_t i = 0; i < warp_id; ++i) { s += warp_sums[i]; } packed_item.store(data, tid, s); } template <uint32_t PACK_SIZE, typename T> __device__ __forceinline__ void device_scan_packed_accu32(T* data, uint32_t tid) { DeviceScanPackedItem<PACK_SIZE, T> scan_pack; __syncwarp(); uint32_t sum = scan_pack.load(data, tid); sum = device_scan_shfl_core<WARP_SIZE>(sum, tid); scan_pack.store(data, tid, sum); __syncwarp(); } namespace kth { const uint32_t BUCKET_BITS = 8, NR_BUCKET = 1 << BUCKET_BITS, LOCAL_CNT_SHARD = 16, BLOCK_DIM = NR_BUCKET * 4; template <uint32_t v> struct enforce_const_u32 { static const uint32_t val = v; }; /*! * \brief compute scattered histogram for the whole input * * launch config: grid(X, batch), thread(BLOCK_DIM) * * Keys not starting with given prefix would be treated as max * * \param[in] input [batch, length] * \param[out] buckets [batch, X, NR_BUCKET] */ template <typename ctype, bool prefix_valid, uint32_t shift> static __global__ void compute_histogram(const ctype* input, uint32_t* bucket_cnt, uint32_t length, int32_t lda, uint32_t* prefix_ptr) { // note that this layout eliminates bank conflict __shared__ uint32_t local_cnt[NR_BUCKET][LOCAL_CNT_SHARD]; int32_t batch = blockIdx.y; input += batch * lda; bucket_cnt += (batch * gridDim.x + blockIdx.x) * NR_BUCKET; uint32_t prefix; if (prefix_valid) { prefix = prefix_ptr[batch]; } { // init local_cnt uint32_t* p = &local_cnt[0][0]; for (uint32_t i = threadIdx.x; i < LOCAL_CNT_SHARD * NR_BUCKET; i += BLOCK_DIM) { p[i] = 0; } __syncthreads(); } { // accumulate uint32_t i = blockIdx.x * BLOCK_DIM + threadIdx.x, stride = BLOCK_DIM * gridDim.x; uint32_t* dst = &local_cnt[0][threadIdx.x % LOCAL_CNT_SHARD]; while (i < length) { uint32_t key = RadixConverter<ctype>::to_radix(input[i]); if (prefix_valid) { const uint32_t mask = ((~0u) << ((prefix_valid ? shift : 0) + BUCKET_BITS)); key |= ((key & enforce_const_u32<mask>::val) == prefix) - 1; } uint32_t idx = (key >> shift) & ((1 << BUCKET_BITS) - 1); atomicAdd(dst + idx * LOCAL_CNT_SHARD, 1); i += stride; } } __syncthreads(); if (threadIdx.x < NR_BUCKET) { uint32_t s = 0; #pragma unroll for (int i = 0; i < LOCAL_CNT_SHARD; ++i) { s += local_cnt[threadIdx.x][(i + threadIdx.x) % LOCAL_CNT_SHARD]; } bucket_cnt[threadIdx.x] = s; } } /*! * \brief update the values in \p prefix to k'th value in according to bucket * count, and update \p k * * launch config: grid(batch), thread(NR_BUCKET) */ template <bool first, bool last, uint32_t shift, typename ctype> static __global__ void update_prefix_and_k(const uint32_t* bucket_cnt, uint32_t* prefix, uint32_t* k, uint32_t k_init, uint32_t bucket_sharding_size, ctype* result) { __shared__ uint32_t cumsum_bucket_cnt[NR_BUCKET + 1]; uint32_t batch = blockIdx.x; bucket_cnt += batch * bucket_sharding_size * NR_BUCKET; uint32_t sum = 0; for (uint32_t i = 0; i < bucket_sharding_size; ++i) { sum += bucket_cnt[i * NR_BUCKET + threadIdx.x]; } if (!threadIdx.x) { cumsum_bucket_cnt[0] = 0; } const uint32_t i = threadIdx.x + 1; cumsum_bucket_cnt[i] = sum; device_scan<NR_BUCKET, 1>(cumsum_bucket_cnt + 1, threadIdx.x, 0); __syncthreads(); uint32_t kv = first ? k_init : k[batch]; if ((cumsum_bucket_cnt[i] >= kv) & (cumsum_bucket_cnt[i - 1] < kv)) { uint32_t b = (i - 1) << shift; if (first) { prefix[batch] = b; } else if (last) { result[batch] = RadixConverter<ctype>::from_radix(prefix[batch] | b); } else { prefix[batch] |= b; } if (!last) { k[batch] = kv - cumsum_bucket_cnt[i - 1]; } } if ((cumsum_bucket_cnt[NR_BUCKET] < kv) | (cumsum_bucket_cnt[i] != cumsum_bucket_cnt[i - 1] + sum)) { // impossible int* bad = 0x0; *bad = 23; } } static uint32_t get_grid_dim_x(uint32_t length) { return std::max<uint32_t>(length / (128 * BLOCK_DIM), 1); } } // namespace kth /*! * \brief select values smaller or larger than given threshold * * Note: we use register shuffle extensively to perform both reduce and scan. */ namespace select { struct LessPred { template <typename ctype> __device__ __forceinline__ static bool cmp(ctype x, ctype y) { return x < y; } }; struct GreaterPred { template <typename ctype> __device__ __forceinline__ static bool cmp(ctype x, ctype y) { return x > y; } }; const uint32_t REDUCE_WARP_SIZE = 16, REDUCE_SIZE = WARP_SIZE * 4, REDUCE_SHARD = 64; /*! * \brief reduce number of elements satisfying Pred in (N, M) mat to * (N, ceil(M / REDUCE_SIZE)) * * launch config: grid(X, batch), * thread(REDUCE_WARP_SIZE, REDUCE_SHARD) * * Each block computes REDUCE_SHARD outputs */ template <typename ctype, class Pred> static __global__ void kern_reduce_block_cnt(const ctype* input_data, const ctype* input_thresh, uint32_t length, int32_t lda, uint64_t* output, uint32_t output_width) { static const uint32_t BLOCK_DIM_X = REDUCE_WARP_SIZE, BLOCK_DIM_Y = REDUCE_SHARD; uint32_t batch = blockIdx.y, out_col = blockIdx.x * BLOCK_DIM_Y + threadIdx.y, col_begin = out_col * REDUCE_SIZE, col_end = min(col_begin + REDUCE_SIZE, length), tid_local = threadIdx.x; if (out_col >= output_width) { return; } uint32_t thresh = RadixConverter<ctype>::to_radix(input_thresh[batch]); input_data += static_cast<int32_t>(batch) * lda; uint32_t sum_eq = 0, sum_lt = 0; for (uint32_t i = col_begin + tid_local; i < col_end; i += BLOCK_DIM_X) { uint32_t iv = RadixConverter<ctype>::to_radix(input_data[i]); sum_eq += iv == thresh; sum_lt += Pred::cmp(iv, thresh); } #pragma unroll for (uint32_t step = REDUCE_WARP_SIZE / 2; step >= 1; step >>= 1) { sum_eq += __shfl_down(sum_eq, step, REDUCE_WARP_SIZE); sum_lt += __shfl_down(sum_lt, step, REDUCE_WARP_SIZE); } // reduce warp results to a single scalar if (!tid_local) { output[batch * output_width + out_col] = (static_cast<uint64_t>(sum_eq) << 32) | sum_lt; } } static MEGDNN_NOINLINE cudaError_t invoke_cub_scan(const uint64_t* input, uint64_t* output, void* workspace, size_t& workspace_size, uint32_t size, cudaStream_t stream) { return cub::DeviceScan::InclusiveSum(workspace, workspace_size, input, output, size, stream); } static __global__ void kern_init_zero(uint64_t* dst) { dst[0] = 0; } /*! * \brief copy top-k values of each row from input to output * * launch config: grid(X, batch), * thread(WARP_SIZE, COPY_SHARD) */ template <typename ctype, class Pred, int COPY_SHARD> static __global__ void kern_copy(const ctype* input_data, const ctype* input_thresh, const uint64_t* scan, uint32_t scan_width, ctype* output_value, int32_t* output_idx, uint32_t length, uint32_t k, int32_t lda) { #if __cplusplus > 199711L static_assert(REDUCE_SIZE < 256, "local_sum_storage can not be uint8_t"); #endif static const uint32_t BLOCK_DIM_X = WARP_SIZE, BLOCK_DIM_Y = COPY_SHARD; uint32_t scan_col = blockIdx.x * BLOCK_DIM_Y + threadIdx.y; if (scan_col >= scan_width) { return; } uint32_t batch = blockIdx.y, inp_col_begin = min(scan_col * REDUCE_SIZE, length), inp_col_length = min(inp_col_begin + REDUCE_SIZE, length) - inp_col_begin, tid_local = threadIdx.x; uint32_t thresh = RadixConverter<ctype>::to_radix(input_thresh[batch]); input_data += static_cast<int32_t>(batch) * lda + static_cast<int>(inp_col_begin); __shared__ uint8_t local_sum_storage[BLOCK_DIM_Y][2][REDUCE_SIZE + 4]; uint8_t *local_sum_eq = local_sum_storage[threadIdx.y][0], *local_sum_lt = local_sum_storage[threadIdx.y][1]; if (!tid_local) { local_sum_eq[3] = 0; local_sum_lt[3] = 0; } local_sum_eq += 4; local_sum_lt += 4; const uint32_t WORKLOAD = REDUCE_SIZE / WARP_SIZE; #pragma unroll for (uint32_t j = 0; j < WORKLOAD; ++j) { uint32_t i = j * BLOCK_DIM_X + tid_local; if (i < inp_col_length) { uint32_t iv = RadixConverter<ctype>::to_radix(input_data[i]); local_sum_eq[i] = iv == thresh; local_sum_lt[i] = Pred::cmp(iv, thresh); } else { local_sum_eq[i] = 0; local_sum_lt[i] = 0; } } device_scan_packed_accu32<WORKLOAD, uint8_t>(local_sum_eq, tid_local); device_scan_packed_accu32<WORKLOAD, uint8_t>(local_sum_lt, tid_local); scan += batch * scan_width; uint64_t scan_prev_pack = scan[static_cast<int>(scan_col) - 1], k_offset_pack = scan_prev_pack - scan[-1], scan_self_pack = scan[scan_col] - scan_prev_pack; #define unpack(name) \ uint32_t name##_eq = u32_from_64_high(name##_pack), \ name##_lt = u32_from_64_low(name##_pack) unpack(k_offset); unpack(scan_self); #undef unpack uint32_t allowed_eq = k - min(k, (u32_from_64_low(scan[scan_width - 1]) - u32_from_64_low(scan[-1]))), ls_lt_max = k - min(k_offset_lt, k), ls_eq_max = allowed_eq - min(allowed_eq, k_offset_eq); if ((scan_self_lt && ls_lt_max) || (scan_self_eq && ls_eq_max)) { #pragma unroll for (uint32_t j = 0; j < WORKLOAD; ++j) { int32_t i = j * BLOCK_DIM_X + tid_local; uint32_t cur_lt = local_sum_lt[i], cur_eq = local_sum_eq[i]; bool is_lt = cur_lt <= ls_lt_max && cur_lt != local_sum_lt[i - 1]; bool is_eq = cur_eq <= ls_eq_max && cur_eq != local_sum_eq[i - 1]; // exactly one should be true if (is_lt || is_eq) { uint32_t off_lt = cur_lt + k_offset_lt - 1; uint32_t off_eq = cur_eq + k_offset_eq - 1 + (k - allowed_eq); uint32_t ocol = is_lt ? off_lt : off_eq; output_value[batch * k + ocol] = input_data[i]; output_idx[batch * k + ocol] = i + inp_col_begin; } } } } //! get workspace for scan, aligned to uint64_t static size_t get_scan_workspace(uint32_t size) { size_t wk = 0; cudaError_t err = invoke_cub_scan(NULL, NULL, NULL, wk, size, NULL); if (err != cudaSuccess) { fprintf(stderr, "topk: cub scan failed: %s (%d)\n", cudaGetErrorString(err), static_cast<int>(err)); megdnn_trap(); } return ((wk - 1) / sizeof(uint64_t) + 1) * sizeof(uint64_t); } } // namespace select } // namespace cuda_topk_impl uint32_t topk::find_kth_radix_workspace(uint32_t batch, uint32_t length, uint32_t grid_dim_y_limit) { using namespace cuda_topk_impl::kth; uint32_t limit = batch > grid_dim_y_limit ? grid_dim_y_limit : batch; return (limit * get_grid_dim_x(length) * NR_BUCKET + limit * 2) * sizeof(uint32_t); } template <typename ctype> cudaError_t topk::find_kth_radix(const ctype* input, ctype* output, void* workspace, uint32_t batch, uint32_t length, int32_t lda, int32_t k, uint32_t grid_dim_y_limit, cudaStream_t stream) { using namespace cuda_topk_impl::kth; if (!k) { return cudaErrorUnknown; } if (k < 0) { k = length + k + 1; } if (!(BUCKET_BITS == 8 && (sizeof(ctype) == 4 || sizeof(ctype) == 2))) { // no c++11 in megdnn cuda; so we just trap instead of using static // assert megdnn_trap(); } uint32_t batch_idx = 0; uint32_t grid_dim_x = get_grid_dim_x(length); uint32_t grid_dim_y = 1; while (batch_idx < batch) { if (batch - batch_idx >= grid_dim_y_limit) { grid_dim_y = grid_dim_y_limit; } else { grid_dim_y = batch - batch_idx; } dim3 grid_dim(grid_dim_x, grid_dim_y); uint32_t* dev_k = static_cast<uint32_t*>(workspace); uint32_t* dev_prefix = dev_k + grid_dim_y; uint32_t* bucket_cnt = dev_prefix + grid_dim_y; compute_histogram<ctype, false, 24><<<grid_dim, BLOCK_DIM, 0, stream>>>( input + batch_idx * lda, bucket_cnt, length, lda, nullptr); // use float to make compiler happy; it is not used since last == false update_prefix_and_k<true, false, 24, float> <<<grid_dim_y, NR_BUCKET, 0, stream>>>( bucket_cnt, dev_prefix, dev_k, k, grid_dim_x, nullptr); compute_histogram<ctype, true, 16><<<grid_dim, BLOCK_DIM, 0, stream>>>( input + batch_idx * lda, bucket_cnt, length, lda, dev_prefix); update_prefix_and_k<false, false, 16, float> <<<grid_dim_y, NR_BUCKET, 0, stream>>>( bucket_cnt, dev_prefix, dev_k, k, grid_dim_x, nullptr); compute_histogram<ctype, true, 8><<<grid_dim, BLOCK_DIM, 0, stream>>>( input + batch_idx * lda, bucket_cnt, length, lda, dev_prefix); update_prefix_and_k<false, false, 8, float> <<<grid_dim_y, NR_BUCKET, 0, stream>>>( bucket_cnt, dev_prefix, dev_k, k, grid_dim_x, nullptr); compute_histogram<ctype, true, 0><<<grid_dim, BLOCK_DIM, 0, stream>>>( input + batch_idx * lda, bucket_cnt, length, lda, dev_prefix); update_prefix_and_k<false, true, 0, ctype> <<<grid_dim_y, NR_BUCKET, 0, stream>>>(bucket_cnt, dev_prefix, dev_k, k, grid_dim_x, output + batch_idx); batch_idx += grid_dim_y; } return cudaGetLastError(); } template <typename ctype> cudaError_t topk::topk_select(const ctype* input, const ctype* thresh, ctype* output_value, int32_t* output_idx, void* workspace, uint32_t batch, uint32_t length, int32_t lda, int32_t k, uint32_t batch_upper_limit, cudaStream_t stream) { using namespace cuda_topk_impl; using namespace cuda_topk_impl::select; uint32_t length_split = DIVUP(length, REDUCE_SIZE); void (*kptr_reduce_block_cnt)(const ctype*, const ctype*, uint32_t, int32_t, uint64_t*, uint32_t); void (*kptr_copy)(const ctype*, const ctype*, const uint64_t*, uint32_t, ctype*, int32_t*, uint32_t, uint32_t, int32_t); int kern_copy_shard; { int grid, block; cudaError_t err = cudaOccupancyMaxPotentialBlockSize( &grid, &block, kern_copy<ctype, GreaterPred, 32>); if (err) { return err; } kern_copy_shard = block / (WARP_SIZE * 8) * 8; if (!kern_copy_shard) { fprintf(stderr, "topk: failed to launch: block=%d\n", block); return cudaErrorLaunchOutOfResources; } } #define CASE_SHARD_ON(pred, n) \ case n: \ kptr_copy = kern_copy<ctype, pred, n>; \ break #define CASE_SHARD(pred) \ switch (kern_copy_shard) { \ CASE_SHARD_ON(pred, 8); \ CASE_SHARD_ON(pred, 16); \ CASE_SHARD_ON(pred, 24); \ CASE_SHARD_ON(pred, 32); \ default: \ fprintf(stderr, "topk: failed to launch: shard=%d\n", \ kern_copy_shard); \ return cudaErrorLaunchOutOfResources; \ } if (k < 0) { k = -k; kptr_reduce_block_cnt = kern_reduce_block_cnt<ctype, GreaterPred>; CASE_SHARD(GreaterPred); } else { kptr_reduce_block_cnt = kern_reduce_block_cnt<ctype, LessPred>; CASE_SHARD(LessPred); } #undef CASE_SHARD #undef CASE_SHARD_ON uint32_t batch_idx = 0; uint32_t batch_real = 1; while (batch_idx < batch) { if (batch - batch_idx >= batch_upper_limit) { batch_real = batch_upper_limit; } else { batch_real = batch - batch_idx; } size_t scan_size = batch_real * length_split; size_t scan_wk = get_scan_workspace(scan_size); uint64_t *scan_inp = static_cast<uint64_t*>(workspace) + scan_wk / sizeof(uint64_t), *scan_out = scan_inp + scan_size; // reduce to scan_inp kptr_reduce_block_cnt<<< dim3(DIVUP(length_split, REDUCE_SHARD), batch_real), dim3(REDUCE_WARP_SIZE, REDUCE_SHARD), 0, stream>>>( input + batch_idx * lda, thresh + batch_idx, length, lda, scan_inp, length_split); // scan to scan_out scan_out += 1; // set scan[-1] to 0 cudaError_t err = invoke_cub_scan(scan_inp, scan_out, workspace, scan_wk, scan_size, stream); if (err != cudaSuccess) { return err; } kern_init_zero<<<1, 1, 0, stream>>>(scan_out - 1); // copy result kptr_copy<<<dim3(DIVUP(length_split, kern_copy_shard), batch_real), dim3(WARP_SIZE, kern_copy_shard), 0, stream>>>( input + batch_idx * lda, thresh + batch_idx, scan_out, length_split, output_value + std::abs(k) * batch_idx, output_idx + std::abs(k) * batch_idx, length, k, lda); batch_idx += batch_real; } return cudaGetLastError(); } uint32_t topk::topk_select_workspace(uint32_t batch, uint32_t length) { using namespace cuda_topk_impl::select; size_t scan_size = batch * DIVUP(length, REDUCE_SIZE); return get_scan_workspace(scan_size) + sizeof(uint64_t) * (scan_size * 2 + 1); } namespace megdnn { namespace cuda { namespace topk { #define INST(t) \ template cudaError_t find_kth_radix<t>(const t*, t*, void*, uint32_t, \ uint32_t, int32_t, int32_t, \ uint32_t, cudaStream_t); \ template cudaError_t topk_select<t>(const t*, const t*, t*, int32_t*, \ void*, uint32_t, uint32_t, int32_t, \ int32_t, uint32_t, cudaStream_t) INST(float); INST(int32_t); DNN_INC_FLOAT16(INST(dt_float16)); #undef INST } // namespace topk } // namespace cuda } // namespace megdnn // vim: ft=cuda syntax=cuda.doxygen
b71e64f36aa75d3e04a586ef7f802662d67ce2e1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /************************************************************************* * Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ************************************************************************/ #include <algorithm> #include <cassert> #include "core.h" #include "common_kernel.h" #include "copy_kernel.h" #include "enqueue.h" /* HIERARCHY * * The data is split into CHUNKS, and each CHUNK is split into NUM_SUBCHUNKS * SUBCHUNKS, where each SUBCHUNK is processed independently. A SUBCHUNK is * split into numUnroll UNROLLS and each thread performs UNROLL_COUNT * single-data-element operations inside an UNROLL. As the name suggests, the * UNROLL_COUNT operations within an UNROLL are unrolled. */ // Number of threads used to perform copies, etc. Must be multiple of 32. // An additional thread is used to handle threadfences, so the CUDA blocks // have dimension NUM_THREADS+1. #define NUM_THREADS 256 // Each thread unrolls the innermost loop of the copy or reduction operations // to this many single-data-element instructions #define UNROLL_COUNT 8 #define UNROLL_SIZE (UNROLL_COUNT * NUM_THREADS) // To hide the latency associated with the synchronization between different // subchunks, we interleave the independent subchunks so that more data can be // transferred while the sync is in progress. This is the number of subchunks // that are active at the same time #define NUM_SUBCHUNKS 2 // If this is called with STEP, it means that we just finished processing the // data for step STEP on this GPU, which is the data required on the next GPU // for step STEP + 1, so we signal the next GPU that its data for step STEP + 1 // is available. This is called by one particular consumer warp and so we select // the first thread in the warp to set the flag. #define SIGNAL_NEW_DATA_AVAILABLE(chunk, subchunk, step) \ do { \ __threadfence_system(); \ args.NextNewDataAvailableFlag[0] = \ NUM_SUBCHUNKS*((chunk) * (args.NumGPUs - 1) + (step)) + subchunk+1; \ } while (0) // This is called by all producer threads, but only thread 0 spins on the flag, #define WAIT_FOR_NEW_DATA(chunk, subchunk, step) \ do { \ if (tid == 0) { \ Wait([=] { \ return ((volatile int *)args.ThisNewDataAvailableFlag)[0] >= \ NUM_SUBCHUNKS*((chunk) * (args.NumGPUs - 1) + (step)) \ + subchunk + 1 - NUM_SUBCHUNKS; \ }); \ } \ BAR(sync, 1, NUM_THREADS); \ } while (0) #define SIGNAL_CHUNK_DONE(chunk, subchunk) \ do { \ __threadfence_system(); \ args.PrevChunkDoneFlag[0] = NUM_SUBCHUNKS*(chunk) + (subchunk) + 1; \ } while (0) #define WAIT_FOR_PREV_CHUNK(chunk, subchunk) \ do { \ if (tid == 0) { \ Wait([=] { \ return ((volatile int*)args.ThisChunkDoneFlag)[0] >= \ NUM_SUBCHUNKS*(chunk) + subchunk + 1-NUM_SUBCHUNKS; \ }); \ } \ BAR(sync, 1, NUM_THREADS); \ } while (0) __device__ inline void getSliceSizeAndChunkSize(int *sliceSize, int slice, int numSlices, int numBigSlices, int numSmallSlices, int bigSliceN, int smallSliceN, int lastSliceN) { if (slice < numBigSlices) { *sliceSize = bigSliceN; } else { *sliceSize = (slice < numBigSlices + numSmallSlices) ? smallSliceN : ((slice == numSlices - 1) ? lastSliceN : 0); } } template<typename T> struct AllGatherKernelArgs { // general parameters int ThisId; int NumGPUs; int N; int * UserFromRing; // some pre-computed sizes int SliceSize; int ChunkSize; int NumChunks; int BufferSliceStride; int BufferMisalignedN; T ** ThisPtrToNextOutput; T ** PrevPtrToThisOutput; // local and remote input, output, and buffer const T * __restrict__ ThisInput; volatile T * __restrict__ ThisOutput; volatile T * __restrict__ ThisBuffer; volatile T * __restrict__ NextBuffer; // local and remote flags volatile int * __restrict__ ThisNewDataAvailableFlag; volatile int * __restrict__ NextNewDataAvailableFlag; volatile int * __restrict__ ThisChunkDoneFlag; volatile int * __restrict__ PrevChunkDoneFlag; }; __device__ inline int GetBlock(const int index, const int step, const int * const userFromRing, const int numGPUs) { return userFromRing[(numGPUs + index - step) % numGPUs]; } __shared__ volatile void * nextOutput; template<int THREADS, int UNROLL, bool PUSHRECV, typename T> __global__ void AllGatherKernel(const AllGatherKernelArgs<T> args) { if (args.N == 0) return; int tid = threadIdx.x; // First wait for args.PrevPtrToThisOutput to become nullptr to ensure that // the previous GPU is done with a previous collective operation. if (tid == 0) { Wait([=] { return *((T * volatile *)args.PrevPtrToThisOutput) == nullptr; }); *((T * volatile *)args.PrevPtrToThisOutput) = (T*)args.ThisOutput; Wait([=] { return *((T * volatile *)args.ThisPtrToNextOutput) != nullptr; }); if(PUSHRECV) nextOutput = *((volatile void * volatile *)args.ThisPtrToNextOutput); } __syncthreads(); for (int chunk = 0; chunk < args.NumChunks; ++chunk) { // calculate slice size. for all chunks except (possibly) the last one, // this will just be args.SliceSize. For the last one, it may be smaller int bigSliceN = args.SliceSize; int smallSliceN = 0; int lastSliceN = 0; int numSlices = NUM_SUBCHUNKS; int numBigSlices = numSlices; int numSmallSlices = 0; // last chunk if ((chunk + 1 == args.NumChunks) && (args.N % args.ChunkSize > 0)) CalcLastChunk<THREADS, UNROLL, T>(&bigSliceN, &smallSliceN, &lastSliceN, &numSlices, &numBigSlices, &numSmallSlices, args.N, args.NumChunks, args.ChunkSize); // this offset is only applied to Data pointers, not to Buffer pointers, // since we only have one buffer per chunk int chunkOffset = chunk * args.ChunkSize; // step 0: copy the resident block from the ThisInput to ThisOutput and also // to NextOutput int step = 0; int block = GetBlock(args.ThisId, step, args.UserFromRing, args.NumGPUs); int outputOffset = chunkOffset + block * args.N; int inputOffset = chunkOffset; int bufferOffset; int sliceSize; if (!PUSHRECV) { bufferOffset = block * NUM_SUBCHUNKS * args.BufferSliceStride + block * args.BufferMisalignedN; } // Copy from ThisInput if (tid < THREADS) { for(int s=0; s<NUM_SUBCHUNKS; ++s) { getSliceSizeAndChunkSize(&sliceSize, s, numSlices, numBigSlices, numSmallSlices, bigSliceN, smallSliceN, lastSliceN); if (!PUSHRECV) WAIT_FOR_PREV_CHUNK(chunk, s); if (PUSHRECV) { DoubleCopy<UNROLL, THREADS>( args.ThisOutput + outputOffset, (volatile T *)nextOutput + outputOffset, args.ThisInput + inputOffset, sliceSize); } else { DoubleCopy<UNROLL, THREADS>( args.ThisOutput + outputOffset, args.NextBuffer + bufferOffset, args.ThisInput + inputOffset, sliceSize); } __syncthreads(); outputOffset += sliceSize; inputOffset += sliceSize; if (!PUSHRECV) bufferOffset += sliceSize; } } else { for(int s=0; s<NUM_SUBCHUNKS; ++s) { __syncthreads(); SIGNAL_NEW_DATA_AVAILABLE(chunk, s, step); } } // steps j with 0 < j < k - 1: // copy a block that was pushed to this GPU to the next GPU for (step = 1; step < args.NumGPUs - 1; ++step) { block = GetBlock(args.ThisId, step, args.UserFromRing, args.NumGPUs); outputOffset = chunkOffset + block * args.N; if (!PUSHRECV) { bufferOffset = block * NUM_SUBCHUNKS * args.BufferSliceStride + block * args.BufferMisalignedN; } if (tid < THREADS) { for(int s=0; s<NUM_SUBCHUNKS; ++s) { getSliceSizeAndChunkSize(&sliceSize, s, numSlices, numBigSlices, numSmallSlices, bigSliceN, smallSliceN, lastSliceN); WAIT_FOR_NEW_DATA(chunk, s, step); if (PUSHRECV) { Copy<UNROLL, THREADS>( (volatile T *)nextOutput + outputOffset, args.ThisOutput + outputOffset, sliceSize); } else { DoubleCopy<UNROLL, THREADS>( args.NextBuffer + bufferOffset, args.ThisOutput + outputOffset, args.ThisBuffer + bufferOffset, sliceSize); } __syncthreads(); outputOffset += sliceSize; if (!PUSHRECV) bufferOffset += sliceSize; } } else { for(int s=0; s<NUM_SUBCHUNKS; ++s) { __syncthreads(); SIGNAL_NEW_DATA_AVAILABLE(chunk, s, step); } } } if (!PUSHRECV) { step = args.NumGPUs - 1; block = GetBlock(args.ThisId, step, args.UserFromRing, args.NumGPUs); outputOffset = chunkOffset + block * args.N; bufferOffset = block * NUM_SUBCHUNKS * args.BufferSliceStride + block * args.BufferMisalignedN; // Make final copy from buffer to dest. if (tid < THREADS) { for(int s=0; s<NUM_SUBCHUNKS; ++s) { getSliceSizeAndChunkSize(&sliceSize, s, numSlices, numBigSlices, numSmallSlices, bigSliceN, smallSliceN, lastSliceN); WAIT_FOR_NEW_DATA(chunk, s, step); Copy<UNROLL, THREADS>( args.ThisOutput + outputOffset, args.ThisBuffer + bufferOffset, sliceSize); __syncthreads(); outputOffset += sliceSize; bufferOffset += sliceSize; } } else { for(int s=0; s<NUM_SUBCHUNKS; ++s) { __syncthreads(); SIGNAL_CHUNK_DONE(chunk, s); } } } } // wait for the last data to be pushed to us if (tid < THREADS) { if (PUSHRECV) WAIT_FOR_NEW_DATA(args.NumChunks, NUM_SUBCHUNKS-1, 0); else WAIT_FOR_PREV_CHUNK(args.NumChunks, NUM_SUBCHUNKS-1); if (tid == 0) { args.ThisNewDataAvailableFlag[0] = 0; args.ThisChunkDoneFlag[0] = 0; *args.ThisPtrToNextOutput = nullptr; } } } template<typename T> ncclResult_t ncclAllGatherWithType(const void* sendbuff, void* recvbuff, int count, ncclComm* comm, int numUnroll, hipStream_t stream) { if (count == 0) return ncclSuccess; int index = comm->ncclId; int blockSizeInBytes = count * sizeof(T); int misalignedBytes = blockSizeInBytes % alignof(uint64_t); assert((int)((misalignedBytes / sizeof(T)) * sizeof(T)) == misalignedBytes); int misalignedN = misalignedBytes / sizeof(T); assert(misalignedN < (int)(sizeof(uint64_t) / sizeof(T))); int paddingN = (misalignedN > 0) ? sizeof(uint64_t) / sizeof(T) : 0; // There is one slice per GPU, so a slice can be at most bufferN / numGPUs, // where bufferN is the number of elements of type T that fit into the buffer. int bufferN = comm->buffSize / sizeof(T); // we only need buffer for k slices and k paddings int bufferNPerSlice = (bufferN - comm->nDev * NUM_SUBCHUNKS * paddingN) / (comm->nDev * NUM_SUBCHUNKS); // For efficiency, we want the slice size to be a multiple of UNROLL_SIZE int maxSliceSize = (bufferNPerSlice / UNROLL_SIZE) * UNROLL_SIZE; int nextId = (index + 1) % comm->nDev; int prevId = (index + comm->nDev - 1) % comm->nDev; AllGatherKernelArgs<T> args; args.ThisId = index; args.NumGPUs = comm->nDev; args.N = count; /* Block j is coming from sendbuff[j], which lives on device with logical * index comm->ringFromUser[j]. But the block ordering does not necessarily * follow the ring ordering. Hence the order in which a particular GPU * processes the different blocks (the correspondence between the step in * the reduction algorithm and the block on which a GPU operates in that * particular step) is not the same as the ring order. * * Say we have 4 GPUs and comm->userFromRing = { 1, 2, 0, 3 }. Then there are 3 * step in the all-gather algorithm and block 0 comes from device 2, block 1 * from 0, block 2 from device 1, and block 3 comes from device 3. In the * first step of the algorithm, each GPU must copy its own block from its * sendbuff to the appropriate location in its recvbuff. The blocks that a * GPU has to process in the next steps is determined by the previous step * because each GPU only hands off data to the next GPU in the ring. * * In the above example, we get the following table of which block is * processed by each GPU in a given step. The columns correspond to the * different GPUs while the rows are the steps in the algorithm. * * GPU 0 1 2 3 * step * 0 1 2 0 3 * 1 3 1 2 0 * 2 0 3 1 2 * * We note the the rows in the above table are just comm->userFromRing in the * first step and the list is cyclicly permuted to the right for each next * step. The columns, which are what the individual GPUs need to know, are * comm->userFromRing traversed backwards and starting at index k for GPU k. * These columns are what we put into args.BlockVsStep to tell the GPU which * block it needs to be processing at a particular step. */ args.UserFromRing = comm->devUserFromRing; args.SliceSize = numUnroll * UNROLL_SIZE * sizeof(PackType) / sizeof(T); args.SliceSize = ::min(maxSliceSize, args.SliceSize); args.ChunkSize = NUM_SUBCHUNKS * args.SliceSize; // don't reduce this if we cut the slice size in half below, because if that // happens, the last chunk will be larger than the other chunks, and we will // need the extra buffer space args.BufferSliceStride = args.SliceSize + paddingN; args.BufferMisalignedN = misalignedN; // avoid a case where we have one or more big chunks and one tiny one int remainder = args.N % args.ChunkSize; if ((args.N > args.ChunkSize) && (remainder > 0) && (args.N < 5 * args.ChunkSize) && (2 * remainder < args.ChunkSize)) { args.SliceSize /= 2; args.ChunkSize = NUM_SUBCHUNKS * args.SliceSize; // round down so we end up with a big last chunk args.NumChunks = args.N / args.ChunkSize; } else { // round up args.NumChunks = (args.N + args.ChunkSize - 1) / args.ChunkSize; } args.ThisPtrToNextOutput = (T**)&(comm->ptrs[nextId].local->recvPtrs[0]); args.PrevPtrToThisOutput = (T**)&(comm->ptrs[prevId].remote->recvPtrs[0]); args.ThisInput = (const T*)sendbuff; args.ThisOutput = (volatile T*)recvbuff; args.ThisBuffer = (volatile T*)comm->ptrs[prevId].local->buff; args.NextBuffer = (volatile T*)comm->ptrs[nextId].remote->buff; args.ThisNewDataAvailableFlag = comm->ptrs[prevId].local->flags; args.NextNewDataAvailableFlag = comm->ptrs[nextId].remote->flags; args.ThisChunkDoneFlag = comm->ptrs[nextId].local->flags + 1; args.PrevChunkDoneFlag = comm->ptrs[prevId].remote->flags + 1; if( comm->useRemoteRecv ) { hipLaunchKernelGGL(( AllGatherKernel<NUM_THREADS, UNROLL_COUNT, true, T>) , dim3(1), dim3(NUM_THREADS + 1), 0, stream, args); } else { hipLaunchKernelGGL(( AllGatherKernel<NUM_THREADS, UNROLL_COUNT, false, T>) , dim3(1), dim3(NUM_THREADS + 1), 0, stream, args); } return ncclSuccess; } class AllGatherFunctor { public: ncclResult_t operator()(const void* sendbuff, void* recvbuff, int count, ncclDataType_t datatype, ncclRedOp_t /*dummy operation*/, int /*dummy root*/, ncclComm* comm, hipStream_t stream) { int numUnroll = 16; // this is optimal on dt07 with 4 GPUs switch (datatype) { case ncclChar: return ncclAllGatherWithType<char>(sendbuff, recvbuff, count, comm, numUnroll, stream); case ncclInt: return ncclAllGatherWithType<int>(sendbuff, recvbuff, count, comm, numUnroll, stream); #if CUDART_VERSION >= 7050 case ncclHalf: return ncclAllGatherWithType<half>(sendbuff, recvbuff, count, comm, numUnroll, stream); #endif case ncclFloat: return ncclAllGatherWithType<float>(sendbuff, recvbuff, count, comm, numUnroll, stream); case ncclDouble: return ncclAllGatherWithType<double>(sendbuff, recvbuff, count, comm, numUnroll, stream); case ncclInt64: return ncclAllGatherWithType<long long>(sendbuff, recvbuff, count, comm, numUnroll, stream); case ncclUint64: return ncclAllGatherWithType<unsigned long long>(sendbuff, recvbuff, count, comm, numUnroll, stream); } return ncclInvalidType; } }; extern "C" DSOGLOBAL ncclResult_t ncclAllGather(const void* sendbuff, int count, ncclDataType_t datatype, void* recvbuff, ncclComm_t comm, hipStream_t stream) { return enqueue(AllGatherFunctor(), sendbuff, recvbuff, count, datatype, ncclSum, 0, comm, stream); }
b71e64f36aa75d3e04a586ef7f802662d67ce2e1.cu
/************************************************************************* * Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ************************************************************************/ #include <algorithm> #include <cassert> #include "core.h" #include "common_kernel.h" #include "copy_kernel.h" #include "enqueue.h" /* HIERARCHY * * The data is split into CHUNKS, and each CHUNK is split into NUM_SUBCHUNKS * SUBCHUNKS, where each SUBCHUNK is processed independently. A SUBCHUNK is * split into numUnroll UNROLLS and each thread performs UNROLL_COUNT * single-data-element operations inside an UNROLL. As the name suggests, the * UNROLL_COUNT operations within an UNROLL are unrolled. */ // Number of threads used to perform copies, etc. Must be multiple of 32. // An additional thread is used to handle threadfences, so the CUDA blocks // have dimension NUM_THREADS+1. #define NUM_THREADS 256 // Each thread unrolls the innermost loop of the copy or reduction operations // to this many single-data-element instructions #define UNROLL_COUNT 8 #define UNROLL_SIZE (UNROLL_COUNT * NUM_THREADS) // To hide the latency associated with the synchronization between different // subchunks, we interleave the independent subchunks so that more data can be // transferred while the sync is in progress. This is the number of subchunks // that are active at the same time #define NUM_SUBCHUNKS 2 // If this is called with STEP, it means that we just finished processing the // data for step STEP on this GPU, which is the data required on the next GPU // for step STEP + 1, so we signal the next GPU that its data for step STEP + 1 // is available. This is called by one particular consumer warp and so we select // the first thread in the warp to set the flag. #define SIGNAL_NEW_DATA_AVAILABLE(chunk, subchunk, step) \ do { \ __threadfence_system(); \ args.NextNewDataAvailableFlag[0] = \ NUM_SUBCHUNKS*((chunk) * (args.NumGPUs - 1) + (step)) + subchunk+1; \ } while (0) // This is called by all producer threads, but only thread 0 spins on the flag, #define WAIT_FOR_NEW_DATA(chunk, subchunk, step) \ do { \ if (tid == 0) { \ Wait([=] { \ return ((volatile int *)args.ThisNewDataAvailableFlag)[0] >= \ NUM_SUBCHUNKS*((chunk) * (args.NumGPUs - 1) + (step)) \ + subchunk + 1 - NUM_SUBCHUNKS; \ }); \ } \ BAR(sync, 1, NUM_THREADS); \ } while (0) #define SIGNAL_CHUNK_DONE(chunk, subchunk) \ do { \ __threadfence_system(); \ args.PrevChunkDoneFlag[0] = NUM_SUBCHUNKS*(chunk) + (subchunk) + 1; \ } while (0) #define WAIT_FOR_PREV_CHUNK(chunk, subchunk) \ do { \ if (tid == 0) { \ Wait([=] { \ return ((volatile int*)args.ThisChunkDoneFlag)[0] >= \ NUM_SUBCHUNKS*(chunk) + subchunk + 1-NUM_SUBCHUNKS; \ }); \ } \ BAR(sync, 1, NUM_THREADS); \ } while (0) __device__ inline void getSliceSizeAndChunkSize(int *sliceSize, int slice, int numSlices, int numBigSlices, int numSmallSlices, int bigSliceN, int smallSliceN, int lastSliceN) { if (slice < numBigSlices) { *sliceSize = bigSliceN; } else { *sliceSize = (slice < numBigSlices + numSmallSlices) ? smallSliceN : ((slice == numSlices - 1) ? lastSliceN : 0); } } template<typename T> struct AllGatherKernelArgs { // general parameters int ThisId; int NumGPUs; int N; int * UserFromRing; // some pre-computed sizes int SliceSize; int ChunkSize; int NumChunks; int BufferSliceStride; int BufferMisalignedN; T ** ThisPtrToNextOutput; T ** PrevPtrToThisOutput; // local and remote input, output, and buffer const T * __restrict__ ThisInput; volatile T * __restrict__ ThisOutput; volatile T * __restrict__ ThisBuffer; volatile T * __restrict__ NextBuffer; // local and remote flags volatile int * __restrict__ ThisNewDataAvailableFlag; volatile int * __restrict__ NextNewDataAvailableFlag; volatile int * __restrict__ ThisChunkDoneFlag; volatile int * __restrict__ PrevChunkDoneFlag; }; __device__ inline int GetBlock(const int index, const int step, const int * const userFromRing, const int numGPUs) { return userFromRing[(numGPUs + index - step) % numGPUs]; } __shared__ volatile void * nextOutput; template<int THREADS, int UNROLL, bool PUSHRECV, typename T> __global__ void AllGatherKernel(const AllGatherKernelArgs<T> args) { if (args.N == 0) return; int tid = threadIdx.x; // First wait for args.PrevPtrToThisOutput to become nullptr to ensure that // the previous GPU is done with a previous collective operation. if (tid == 0) { Wait([=] { return *((T * volatile *)args.PrevPtrToThisOutput) == nullptr; }); *((T * volatile *)args.PrevPtrToThisOutput) = (T*)args.ThisOutput; Wait([=] { return *((T * volatile *)args.ThisPtrToNextOutput) != nullptr; }); if(PUSHRECV) nextOutput = *((volatile void * volatile *)args.ThisPtrToNextOutput); } __syncthreads(); for (int chunk = 0; chunk < args.NumChunks; ++chunk) { // calculate slice size. for all chunks except (possibly) the last one, // this will just be args.SliceSize. For the last one, it may be smaller int bigSliceN = args.SliceSize; int smallSliceN = 0; int lastSliceN = 0; int numSlices = NUM_SUBCHUNKS; int numBigSlices = numSlices; int numSmallSlices = 0; // last chunk if ((chunk + 1 == args.NumChunks) && (args.N % args.ChunkSize > 0)) CalcLastChunk<THREADS, UNROLL, T>(&bigSliceN, &smallSliceN, &lastSliceN, &numSlices, &numBigSlices, &numSmallSlices, args.N, args.NumChunks, args.ChunkSize); // this offset is only applied to Data pointers, not to Buffer pointers, // since we only have one buffer per chunk int chunkOffset = chunk * args.ChunkSize; // step 0: copy the resident block from the ThisInput to ThisOutput and also // to NextOutput int step = 0; int block = GetBlock(args.ThisId, step, args.UserFromRing, args.NumGPUs); int outputOffset = chunkOffset + block * args.N; int inputOffset = chunkOffset; int bufferOffset; int sliceSize; if (!PUSHRECV) { bufferOffset = block * NUM_SUBCHUNKS * args.BufferSliceStride + block * args.BufferMisalignedN; } // Copy from ThisInput if (tid < THREADS) { for(int s=0; s<NUM_SUBCHUNKS; ++s) { getSliceSizeAndChunkSize(&sliceSize, s, numSlices, numBigSlices, numSmallSlices, bigSliceN, smallSliceN, lastSliceN); if (!PUSHRECV) WAIT_FOR_PREV_CHUNK(chunk, s); if (PUSHRECV) { DoubleCopy<UNROLL, THREADS>( args.ThisOutput + outputOffset, (volatile T *)nextOutput + outputOffset, args.ThisInput + inputOffset, sliceSize); } else { DoubleCopy<UNROLL, THREADS>( args.ThisOutput + outputOffset, args.NextBuffer + bufferOffset, args.ThisInput + inputOffset, sliceSize); } __syncthreads(); outputOffset += sliceSize; inputOffset += sliceSize; if (!PUSHRECV) bufferOffset += sliceSize; } } else { for(int s=0; s<NUM_SUBCHUNKS; ++s) { __syncthreads(); SIGNAL_NEW_DATA_AVAILABLE(chunk, s, step); } } // steps j with 0 < j < k - 1: // copy a block that was pushed to this GPU to the next GPU for (step = 1; step < args.NumGPUs - 1; ++step) { block = GetBlock(args.ThisId, step, args.UserFromRing, args.NumGPUs); outputOffset = chunkOffset + block * args.N; if (!PUSHRECV) { bufferOffset = block * NUM_SUBCHUNKS * args.BufferSliceStride + block * args.BufferMisalignedN; } if (tid < THREADS) { for(int s=0; s<NUM_SUBCHUNKS; ++s) { getSliceSizeAndChunkSize(&sliceSize, s, numSlices, numBigSlices, numSmallSlices, bigSliceN, smallSliceN, lastSliceN); WAIT_FOR_NEW_DATA(chunk, s, step); if (PUSHRECV) { Copy<UNROLL, THREADS>( (volatile T *)nextOutput + outputOffset, args.ThisOutput + outputOffset, sliceSize); } else { DoubleCopy<UNROLL, THREADS>( args.NextBuffer + bufferOffset, args.ThisOutput + outputOffset, args.ThisBuffer + bufferOffset, sliceSize); } __syncthreads(); outputOffset += sliceSize; if (!PUSHRECV) bufferOffset += sliceSize; } } else { for(int s=0; s<NUM_SUBCHUNKS; ++s) { __syncthreads(); SIGNAL_NEW_DATA_AVAILABLE(chunk, s, step); } } } if (!PUSHRECV) { step = args.NumGPUs - 1; block = GetBlock(args.ThisId, step, args.UserFromRing, args.NumGPUs); outputOffset = chunkOffset + block * args.N; bufferOffset = block * NUM_SUBCHUNKS * args.BufferSliceStride + block * args.BufferMisalignedN; // Make final copy from buffer to dest. if (tid < THREADS) { for(int s=0; s<NUM_SUBCHUNKS; ++s) { getSliceSizeAndChunkSize(&sliceSize, s, numSlices, numBigSlices, numSmallSlices, bigSliceN, smallSliceN, lastSliceN); WAIT_FOR_NEW_DATA(chunk, s, step); Copy<UNROLL, THREADS>( args.ThisOutput + outputOffset, args.ThisBuffer + bufferOffset, sliceSize); __syncthreads(); outputOffset += sliceSize; bufferOffset += sliceSize; } } else { for(int s=0; s<NUM_SUBCHUNKS; ++s) { __syncthreads(); SIGNAL_CHUNK_DONE(chunk, s); } } } } // wait for the last data to be pushed to us if (tid < THREADS) { if (PUSHRECV) WAIT_FOR_NEW_DATA(args.NumChunks, NUM_SUBCHUNKS-1, 0); else WAIT_FOR_PREV_CHUNK(args.NumChunks, NUM_SUBCHUNKS-1); if (tid == 0) { args.ThisNewDataAvailableFlag[0] = 0; args.ThisChunkDoneFlag[0] = 0; *args.ThisPtrToNextOutput = nullptr; } } } template<typename T> ncclResult_t ncclAllGatherWithType(const void* sendbuff, void* recvbuff, int count, ncclComm* comm, int numUnroll, cudaStream_t stream) { if (count == 0) return ncclSuccess; int index = comm->ncclId; int blockSizeInBytes = count * sizeof(T); int misalignedBytes = blockSizeInBytes % alignof(uint64_t); assert((int)((misalignedBytes / sizeof(T)) * sizeof(T)) == misalignedBytes); int misalignedN = misalignedBytes / sizeof(T); assert(misalignedN < (int)(sizeof(uint64_t) / sizeof(T))); int paddingN = (misalignedN > 0) ? sizeof(uint64_t) / sizeof(T) : 0; // There is one slice per GPU, so a slice can be at most bufferN / numGPUs, // where bufferN is the number of elements of type T that fit into the buffer. int bufferN = comm->buffSize / sizeof(T); // we only need buffer for k slices and k paddings int bufferNPerSlice = (bufferN - comm->nDev * NUM_SUBCHUNKS * paddingN) / (comm->nDev * NUM_SUBCHUNKS); // For efficiency, we want the slice size to be a multiple of UNROLL_SIZE int maxSliceSize = (bufferNPerSlice / UNROLL_SIZE) * UNROLL_SIZE; int nextId = (index + 1) % comm->nDev; int prevId = (index + comm->nDev - 1) % comm->nDev; AllGatherKernelArgs<T> args; args.ThisId = index; args.NumGPUs = comm->nDev; args.N = count; /* Block j is coming from sendbuff[j], which lives on device with logical * index comm->ringFromUser[j]. But the block ordering does not necessarily * follow the ring ordering. Hence the order in which a particular GPU * processes the different blocks (the correspondence between the step in * the reduction algorithm and the block on which a GPU operates in that * particular step) is not the same as the ring order. * * Say we have 4 GPUs and comm->userFromRing = { 1, 2, 0, 3 }. Then there are 3 * step in the all-gather algorithm and block 0 comes from device 2, block 1 * from 0, block 2 from device 1, and block 3 comes from device 3. In the * first step of the algorithm, each GPU must copy its own block from its * sendbuff to the appropriate location in its recvbuff. The blocks that a * GPU has to process in the next steps is determined by the previous step * because each GPU only hands off data to the next GPU in the ring. * * In the above example, we get the following table of which block is * processed by each GPU in a given step. The columns correspond to the * different GPUs while the rows are the steps in the algorithm. * * GPU 0 1 2 3 * step * 0 1 2 0 3 * 1 3 1 2 0 * 2 0 3 1 2 * * We note the the rows in the above table are just comm->userFromRing in the * first step and the list is cyclicly permuted to the right for each next * step. The columns, which are what the individual GPUs need to know, are * comm->userFromRing traversed backwards and starting at index k for GPU k. * These columns are what we put into args.BlockVsStep to tell the GPU which * block it needs to be processing at a particular step. */ args.UserFromRing = comm->devUserFromRing; args.SliceSize = numUnroll * UNROLL_SIZE * sizeof(PackType) / sizeof(T); args.SliceSize = std::min(maxSliceSize, args.SliceSize); args.ChunkSize = NUM_SUBCHUNKS * args.SliceSize; // don't reduce this if we cut the slice size in half below, because if that // happens, the last chunk will be larger than the other chunks, and we will // need the extra buffer space args.BufferSliceStride = args.SliceSize + paddingN; args.BufferMisalignedN = misalignedN; // avoid a case where we have one or more big chunks and one tiny one int remainder = args.N % args.ChunkSize; if ((args.N > args.ChunkSize) && (remainder > 0) && (args.N < 5 * args.ChunkSize) && (2 * remainder < args.ChunkSize)) { args.SliceSize /= 2; args.ChunkSize = NUM_SUBCHUNKS * args.SliceSize; // round down so we end up with a big last chunk args.NumChunks = args.N / args.ChunkSize; } else { // round up args.NumChunks = (args.N + args.ChunkSize - 1) / args.ChunkSize; } args.ThisPtrToNextOutput = (T**)&(comm->ptrs[nextId].local->recvPtrs[0]); args.PrevPtrToThisOutput = (T**)&(comm->ptrs[prevId].remote->recvPtrs[0]); args.ThisInput = (const T*)sendbuff; args.ThisOutput = (volatile T*)recvbuff; args.ThisBuffer = (volatile T*)comm->ptrs[prevId].local->buff; args.NextBuffer = (volatile T*)comm->ptrs[nextId].remote->buff; args.ThisNewDataAvailableFlag = comm->ptrs[prevId].local->flags; args.NextNewDataAvailableFlag = comm->ptrs[nextId].remote->flags; args.ThisChunkDoneFlag = comm->ptrs[nextId].local->flags + 1; args.PrevChunkDoneFlag = comm->ptrs[prevId].remote->flags + 1; if( comm->useRemoteRecv ) { AllGatherKernel<NUM_THREADS, UNROLL_COUNT, true, T> <<<1, NUM_THREADS + 1, 0, stream>>>(args); } else { AllGatherKernel<NUM_THREADS, UNROLL_COUNT, false, T> <<<1, NUM_THREADS + 1, 0, stream>>>(args); } return ncclSuccess; } class AllGatherFunctor { public: ncclResult_t operator()(const void* sendbuff, void* recvbuff, int count, ncclDataType_t datatype, ncclRedOp_t /*dummy operation*/, int /*dummy root*/, ncclComm* comm, cudaStream_t stream) { int numUnroll = 16; // this is optimal on dt07 with 4 GPUs switch (datatype) { case ncclChar: return ncclAllGatherWithType<char>(sendbuff, recvbuff, count, comm, numUnroll, stream); case ncclInt: return ncclAllGatherWithType<int>(sendbuff, recvbuff, count, comm, numUnroll, stream); #if CUDART_VERSION >= 7050 case ncclHalf: return ncclAllGatherWithType<half>(sendbuff, recvbuff, count, comm, numUnroll, stream); #endif case ncclFloat: return ncclAllGatherWithType<float>(sendbuff, recvbuff, count, comm, numUnroll, stream); case ncclDouble: return ncclAllGatherWithType<double>(sendbuff, recvbuff, count, comm, numUnroll, stream); case ncclInt64: return ncclAllGatherWithType<long long>(sendbuff, recvbuff, count, comm, numUnroll, stream); case ncclUint64: return ncclAllGatherWithType<unsigned long long>(sendbuff, recvbuff, count, comm, numUnroll, stream); } return ncclInvalidType; } }; extern "C" DSOGLOBAL ncclResult_t ncclAllGather(const void* sendbuff, int count, ncclDataType_t datatype, void* recvbuff, ncclComm_t comm, cudaStream_t stream) { return enqueue(AllGatherFunctor(), sendbuff, recvbuff, count, datatype, ncclSum, 0, comm, stream); }
2ee39695717dfc28335339b379c05166e63dd76a.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <math.h> #include <float.h> #include <hiprand/hiprand_kernel.h> #include "util.h" /* from wikipedia page, for machine epsilon calculation */ /* assumes mantissa in final bits */ __device__ double machine_eps_dbl() { typedef union { long long i64; double d64; } dbl_64; dbl_64 s; s.d64 = 1.; s.i64++; return (s.d64 - 1.); } __device__ float machine_eps_flt() { typedef union { int i32; float f32; } flt_32; flt_32 s; s.f32 = 1.; s.i32++; return (s.f32 - 1.); } #define EPS 0 #define MIN 1 #define MAX 2 extern "C" __global__ void calc_consts(float *fvals, double *dvals) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i==0) { fvals[EPS] = machine_eps_flt(); dvals[EPS]= machine_eps_dbl(); float xf, oldxf; double xd, oldxd; xf = 2.; oldxf = 1.; xd = 2.; oldxd = 1.; /* double until overflow */ /* Note that real fmax is somewhere between xf and oldxf */ while (!isinf(xf)) { oldxf *= 2.; xf *= 2.; } while (!isinf(xd)) { oldxd *= 2.; xd *= 2.; } dvals[MAX] = oldxd; fvals[MAX] = oldxf; /* half until overflow */ /* Note that real fmin is somewhere between xf and oldxf */ xf = 1.; oldxf = 2.; xd = 1.; oldxd = 2.; while (xf != 0.) { oldxf /= 2.; xf /= 2.; } while (xd != 0.) { oldxd /= 2.; xd /= 2.; } dvals[MIN] = oldxd; fvals[MIN] = oldxf; } return; } __forceinline__ __device__ int next(hiprandState_t *state) { return hiprand(state); } __forceinline__ __device__ float nextGaussian(hiprandState_t *state, float min, float max) { float range = max - min; float result = hiprand_normal(state); return ((result + 5) / 10) * range + min; } __forceinline__ __device__ float nextGaussian(hiprandState_t *state) { return hiprand_normal(state); } __forceinline__ __device__ float nextFloat(hiprandState_t *state) { return hiprand_uniform(state); } extern "C" __global__ void initState(unsigned long long seed, hiprandState_t *state) { int i = threadIdx.x + blockIdx.x * blockDim.x; hiprand_init(seed, 0, 0, &state[i]); } extern "C" __global__ void empty() { } extern "C" __global__ void test_gauss(unsigned long long seed, float* r, hiprandState_t* state) { int i = threadIdx.x + blockIdx.x * blockDim.x; // hiprandState_t state1; // if(threadIdx.x == 0) // printf("%d\n", sizeof(state)); // hiprand_init(seed, i, 0, &state[i]); r[i] = nextGaussian(&state[i], -1, 1); // printf("%5d %f %f\n", i, nextGaussian(&state, -0.5, 0.5), nextGaussian(&state, -1, 1)); // printf("%5d %f %f\n", i, nextGaussian(&state, -0.5, 0.5), nextGaussian(&state, -1, 1)); } extern "C" __global__ void test_gauss2(unsigned long long seed, float* r) { int i = threadIdx.x + blockIdx.x * blockDim.x; hiprandState_t state; // hiprand_init(seed, i, 0, &state); r[i] = nextGaussian(&state); // printf("%5d %f %f\n", i, nextGaussian(&state, -0.5, 0.5), nextGaussian(&state, -1, 1)); // printf("%5d %f %f\n", i, nextGaussian(&state, -0.5, 0.5), nextGaussian(&state, -1, 1)); } extern "C" __global__ void test_float(unsigned long long seed, float* r) { int i = threadIdx.x + blockIdx.x * blockDim.x; hiprandState_t state; // hiprand_init(seed, i, 0, &state); r[i] = nextFloat(&state); // printf("%5d %f %f\n", i, nextGaussian(&state, -0.5, 0.5), nextGaussian(&state, -1, 1)); // printf("%5d %f %f\n", i, nextGaussian(&state, -0.5, 0.5), nextGaussian(&state, -1, 1)); } extern "C" __global__ void slow(float* r, long size) { __shared__ float cache[THREADS_PER_BLOCK]; int base = blockIdx.x * blockDim.x; for(int x=0; x < 777; x++) { int i = threadIdx.x % 57; int j = i % 2 == 1 ? -1 : 1; int index = (threadIdx.x + i + j) % THREADS_PER_BLOCK; if(base + index < size) cache[threadIdx.x] = cache[index] + r[base + index]; } if(base + threadIdx.x < size) r[base + threadIdx.x] = base + threadIdx.x; }
2ee39695717dfc28335339b379c05166e63dd76a.cu
#include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <math.h> #include <float.h> #include <curand_kernel.h> #include "util.h" /* from wikipedia page, for machine epsilon calculation */ /* assumes mantissa in final bits */ __device__ double machine_eps_dbl() { typedef union { long long i64; double d64; } dbl_64; dbl_64 s; s.d64 = 1.; s.i64++; return (s.d64 - 1.); } __device__ float machine_eps_flt() { typedef union { int i32; float f32; } flt_32; flt_32 s; s.f32 = 1.; s.i32++; return (s.f32 - 1.); } #define EPS 0 #define MIN 1 #define MAX 2 extern "C" __global__ void calc_consts(float *fvals, double *dvals) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i==0) { fvals[EPS] = machine_eps_flt(); dvals[EPS]= machine_eps_dbl(); float xf, oldxf; double xd, oldxd; xf = 2.; oldxf = 1.; xd = 2.; oldxd = 1.; /* double until overflow */ /* Note that real fmax is somewhere between xf and oldxf */ while (!isinf(xf)) { oldxf *= 2.; xf *= 2.; } while (!isinf(xd)) { oldxd *= 2.; xd *= 2.; } dvals[MAX] = oldxd; fvals[MAX] = oldxf; /* half until overflow */ /* Note that real fmin is somewhere between xf and oldxf */ xf = 1.; oldxf = 2.; xd = 1.; oldxd = 2.; while (xf != 0.) { oldxf /= 2.; xf /= 2.; } while (xd != 0.) { oldxd /= 2.; xd /= 2.; } dvals[MIN] = oldxd; fvals[MIN] = oldxf; } return; } __forceinline__ __device__ int next(curandState_t *state) { return curand(state); } __forceinline__ __device__ float nextGaussian(curandState_t *state, float min, float max) { float range = max - min; float result = curand_normal(state); return ((result + 5) / 10) * range + min; } __forceinline__ __device__ float nextGaussian(curandState_t *state) { return curand_normal(state); } __forceinline__ __device__ float nextFloat(curandState_t *state) { return curand_uniform(state); } extern "C" __global__ void initState(unsigned long long seed, curandState_t *state) { int i = threadIdx.x + blockIdx.x * blockDim.x; curand_init(seed, 0, 0, &state[i]); } extern "C" __global__ void empty() { } extern "C" __global__ void test_gauss(unsigned long long seed, float* r, curandState_t* state) { int i = threadIdx.x + blockIdx.x * blockDim.x; // curandState_t state1; // if(threadIdx.x == 0) // printf("%d\n", sizeof(state)); // curand_init(seed, i, 0, &state[i]); r[i] = nextGaussian(&state[i], -1, 1); // printf("%5d %f %f\n", i, nextGaussian(&state, -0.5, 0.5), nextGaussian(&state, -1, 1)); // printf("%5d %f %f\n", i, nextGaussian(&state, -0.5, 0.5), nextGaussian(&state, -1, 1)); } extern "C" __global__ void test_gauss2(unsigned long long seed, float* r) { int i = threadIdx.x + blockIdx.x * blockDim.x; curandState_t state; // curand_init(seed, i, 0, &state); r[i] = nextGaussian(&state); // printf("%5d %f %f\n", i, nextGaussian(&state, -0.5, 0.5), nextGaussian(&state, -1, 1)); // printf("%5d %f %f\n", i, nextGaussian(&state, -0.5, 0.5), nextGaussian(&state, -1, 1)); } extern "C" __global__ void test_float(unsigned long long seed, float* r) { int i = threadIdx.x + blockIdx.x * blockDim.x; curandState_t state; // curand_init(seed, i, 0, &state); r[i] = nextFloat(&state); // printf("%5d %f %f\n", i, nextGaussian(&state, -0.5, 0.5), nextGaussian(&state, -1, 1)); // printf("%5d %f %f\n", i, nextGaussian(&state, -0.5, 0.5), nextGaussian(&state, -1, 1)); } extern "C" __global__ void slow(float* r, long size) { __shared__ float cache[THREADS_PER_BLOCK]; int base = blockIdx.x * blockDim.x; for(int x=0; x < 777; x++) { int i = threadIdx.x % 57; int j = i % 2 == 1 ? -1 : 1; int index = (threadIdx.x + i + j) % THREADS_PER_BLOCK; if(base + index < size) cache[threadIdx.x] = cache[index] + r[base + index]; } if(base + threadIdx.x < size) r[base + threadIdx.x] = base + threadIdx.x; }
c4885309f43b0cb7407653f278db5ea7a9fcc1a9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // Created by root on 23/03/2020. // #include "../Matrix.cuh" #include <iostream> __global__ void matrixExp(double *a, double *c, int cr, int cc){ int x = blockIdx.x * blockDim.x + threadIdx.x; // col int y = blockIdx.y * blockDim.y + threadIdx.y; // row if(x < cc && y < cr){ c[y * cc + x] = exp(a[y * cc + x]); } } Matrix Matrix::exp(){ static double* c; c = (double*) calloc(this->Rows*this->Columns,sizeof(double)); //Define os endereoes da memria de vdeo double *d_a, *d_c; //Define o tamanho de cada matriz e escalar na memria long aSize = this->Rows*this->Columns*sizeof(double); long cSize = this->Rows*this->Columns*sizeof(double); //Aloca espao na memria de vdeo hipMalloc((void**)&d_a, aSize); hipMalloc((void**)&d_c, cSize); //Move a matriz e o escalar para a memria de vdeo alocada hipMemcpy(d_a, this->Value, aSize, hipMemcpyHostToDevice); //Define as dimenses dim3 dimBlock(32,32); // 32x32 -> 1024 Threads dim3 dimGrid(this->Rows,this->Columns); //Efetua a multiplicao hipLaunchKernelGGL(( matrixExp), dim3(dimGrid), dim3(dimBlock), 0, 0, d_a, d_c, this->Rows, this->Columns); //Copia o resultado de volta hipMemcpy(c, d_c, cSize, hipMemcpyDeviceToHost); //Limpa a memria de vdeo hipFree(d_a); hipFree(d_c); //Salva return {this->Columns, this->Rows, c}; }
c4885309f43b0cb7407653f278db5ea7a9fcc1a9.cu
// // Created by root on 23/03/2020. // #include "../Matrix.cuh" #include <iostream> __global__ void matrixExp(double *a, double *c, int cr, int cc){ int x = blockIdx.x * blockDim.x + threadIdx.x; // col int y = blockIdx.y * blockDim.y + threadIdx.y; // row if(x < cc && y < cr){ c[y * cc + x] = exp(a[y * cc + x]); } } Matrix Matrix::exp(){ static double* c; c = (double*) calloc(this->Rows*this->Columns,sizeof(double)); //Define os endereçoes da memória de vídeo double *d_a, *d_c; //Define o tamanho de cada matriz e escalar na memória long aSize = this->Rows*this->Columns*sizeof(double); long cSize = this->Rows*this->Columns*sizeof(double); //Aloca espaço na memória de vídeo cudaMalloc((void**)&d_a, aSize); cudaMalloc((void**)&d_c, cSize); //Move a matriz e o escalar para a memória de vídeo alocada cudaMemcpy(d_a, this->Value, aSize, cudaMemcpyHostToDevice); //Define as dimensões dim3 dimBlock(32,32); // 32x32 -> 1024 Threads dim3 dimGrid(this->Rows,this->Columns); //Efetua a multiplicação matrixExp<<<dimGrid, dimBlock>>>(d_a, d_c, this->Rows, this->Columns); //Copia o resultado de volta cudaMemcpy(c, d_c, cSize, cudaMemcpyDeviceToHost); //Limpa a memória de vídeo cudaFree(d_a); cudaFree(d_c); //Salva return {this->Columns, this->Rows, c}; }
8277e93635f506e2a2a044352378e0932a9ea49f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" # include <bits/stdc++.h> # include <hip/hip_runtime.h> #define SIZE 60// Global Size #define BLOCK_SIZE 1024 using namespace std; //::::::::::::::::::::::::::::::::::::::::::GPU:::::::::::::::::::::::::::::::: // :::: Kernel __global__ void kernel_prefix_sum_inefficient(double *g_idata,double *g_odata,int l){ // Sequential Addressing technique __shared__ double sdata[BLOCK_SIZE]; // each thread loads one element from global to shared mem unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; if(i<l && tid !=0){ sdata[tid] = g_idata[i-1]; }else{ sdata[tid] = 0; } // do reduction in shared mem for(unsigned int s=1;s<=tid;s *=2){ __syncthreads(); sdata[tid]+=sdata[tid-s]; } // write result for this block to global mem g_odata[i] = sdata[tid]; } __global__ void kernel_prefix_sum_efficient(double *g_idata,double *g_odata,int l){ // Sequential Addressing technique } // :::: Calls void d_VectorMult(double *Vec1,double *Total){ double * d_Vec1; double * d_Total; double Blocksize=BLOCK_SIZE; // Block of 1Dim hipMalloc((void**)&d_Vec1,SIZE*sizeof(double)); hipMalloc((void**)&d_Total,SIZE*sizeof(double)); hipMemcpy(d_Vec1,Vec1,SIZE*sizeof(double),hipMemcpyHostToDevice); hipMemcpy(d_Total,Total,SIZE*sizeof(double),hipMemcpyHostToDevice); dim3 dimBlock(Blocksize,1,1); dim3 dimGrid(ceil(SIZE/Blocksize),1,1); hipLaunchKernelGGL(( kernel_prefix_sum_inefficient), dim3(dimGrid),dim3(dimBlock), 0, 0, d_Vec1,d_Total,SIZE); /* int temp=SIZE; while(temp>1){ dim3 dimBlock(Blocksize,1,1); int grid=ceil(temp/Blocksize); dim3 dimGrid(grid,1,1); hipLaunchKernelGGL(( kernel_prefix_sum_inefficient), dim3(dimGrid),dim3(dimBlock), 0, 0, d_Vec1,d_Total,SIZE); hipDeviceSynchronize(); hipMemcpy(d_Vec1,d_Total,SIZE*sizeof(double),hipMemcpyDeviceToDevice); temp=ceil(temp/Blocksize); } */ hipMemcpy(Total,d_Total,SIZE*sizeof(double),hipMemcpyDeviceToHost); hipFree(d_Vec1); hipFree(d_Total); } //::::::::::::::::::::::::::::::::::::::::::CPU:::::::::::::::::::::::::::::::: void h_prefix_sum(double *Vec1, double *all){ all[0]=Vec1[0]; for(int i=0;i<SIZE;i++) all[i]=all[i-1]+Vec1[i]; } //:::::::::::::::::::::::::::: Rutinary Functions void Fill_vec(double *Vec,double Value){ for(int i =0 ; i<SIZE ; i++) Vec[i]=Value; } void Show_vec(double *Vec){ for (int i=0;i<SIZE;i++){ if(i%10==0 && i!=0){ cout<<endl; } cout<<"["<<Vec[i]<<"] "; } cout<<endl; } int Checksum(double *Answer1 , double *Answer2){ for(unsigned int i=0;i<SIZE;i++){ if(fabs(Answer1[i]-Answer2[i]) > 0.1){ cout<<"BAD Work Guy"<<endl; return 0; } } cout<<"GOOD Work Guy :D"<<endl; return 0; } // :::::::::::::::::::::::::::::::::::Clock Function:::::::::::::::::::::::::::: double diffclock(clock_t clock1,clock_t clock2){ double diffticks=clock2-clock1; double diffms=(diffticks)/(CLOCKS_PER_SEC/1); // /1000 mili return diffms; } // :::::::::::::::::::::::::::::::::::::::Main::::::::::::::::::::::::::::::::. int main(){ double T1,T2; // Time flags double *Vec1 = (double*)malloc((SIZE)*sizeof(double)); // Elements to compute. CPU way double *Total2 = (double*)malloc((SIZE)*sizeof(double)); // GPU double *Total1 = (double*)malloc(sizeof(double)*(SIZE)); // Total Variables. // Fill the containers vectors of data Fill_vec(Vec1,1.0); Fill_vec(Total2,0.0); // Register time to finish the algorithm // Secuential clock_t start = clock(); h_prefix_sum(Vec1,Total1); clock_t end = clock(); T1=diffclock(start,end); //Show_vec(Total1); //cout<<"Serial Result: "<<*Total1<<" At "<<T1<<",Seconds"<<endl; // Parallel // first case: Total2[0]=Vec1[0]; d_VectorMult(Vec1,Total2); //Show_vec(Total2); Checksum(Total1,Total2); // releasing Memory free(Vec1); free(Total1); free(Total2); return 0; }
8277e93635f506e2a2a044352378e0932a9ea49f.cu
# include <bits/stdc++.h> # include <cuda.h> #define SIZE 60// Global Size #define BLOCK_SIZE 1024 using namespace std; //::::::::::::::::::::::::::::::::::::::::::GPU:::::::::::::::::::::::::::::::: // :::: Kernel __global__ void kernel_prefix_sum_inefficient(double *g_idata,double *g_odata,int l){ // Sequential Addressing technique __shared__ double sdata[BLOCK_SIZE]; // each thread loads one element from global to shared mem unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; if(i<l && tid !=0){ sdata[tid] = g_idata[i-1]; }else{ sdata[tid] = 0; } // do reduction in shared mem for(unsigned int s=1;s<=tid;s *=2){ __syncthreads(); sdata[tid]+=sdata[tid-s]; } // write result for this block to global mem g_odata[i] = sdata[tid]; } __global__ void kernel_prefix_sum_efficient(double *g_idata,double *g_odata,int l){ // Sequential Addressing technique } // :::: Calls void d_VectorMult(double *Vec1,double *Total){ double * d_Vec1; double * d_Total; double Blocksize=BLOCK_SIZE; // Block of 1Dim cudaMalloc((void**)&d_Vec1,SIZE*sizeof(double)); cudaMalloc((void**)&d_Total,SIZE*sizeof(double)); cudaMemcpy(d_Vec1,Vec1,SIZE*sizeof(double),cudaMemcpyHostToDevice); cudaMemcpy(d_Total,Total,SIZE*sizeof(double),cudaMemcpyHostToDevice); dim3 dimBlock(Blocksize,1,1); dim3 dimGrid(ceil(SIZE/Blocksize),1,1); kernel_prefix_sum_inefficient<<<dimGrid,dimBlock>>>(d_Vec1,d_Total,SIZE); /* int temp=SIZE; while(temp>1){ dim3 dimBlock(Blocksize,1,1); int grid=ceil(temp/Blocksize); dim3 dimGrid(grid,1,1); kernel_prefix_sum_inefficient<<<dimGrid,dimBlock>>>(d_Vec1,d_Total,SIZE); cudaDeviceSynchronize(); cudaMemcpy(d_Vec1,d_Total,SIZE*sizeof(double),cudaMemcpyDeviceToDevice); temp=ceil(temp/Blocksize); } */ cudaMemcpy(Total,d_Total,SIZE*sizeof(double),cudaMemcpyDeviceToHost); cudaFree(d_Vec1); cudaFree(d_Total); } //::::::::::::::::::::::::::::::::::::::::::CPU:::::::::::::::::::::::::::::::: void h_prefix_sum(double *Vec1, double *all){ all[0]=Vec1[0]; for(int i=0;i<SIZE;i++) all[i]=all[i-1]+Vec1[i]; } //:::::::::::::::::::::::::::: Rutinary Functions void Fill_vec(double *Vec,double Value){ for(int i =0 ; i<SIZE ; i++) Vec[i]=Value; } void Show_vec(double *Vec){ for (int i=0;i<SIZE;i++){ if(i%10==0 && i!=0){ cout<<endl; } cout<<"["<<Vec[i]<<"] "; } cout<<endl; } int Checksum(double *Answer1 , double *Answer2){ for(unsigned int i=0;i<SIZE;i++){ if(fabs(Answer1[i]-Answer2[i]) > 0.1){ cout<<"BAD Work Guy"<<endl; return 0; } } cout<<"GOOD Work Guy :D"<<endl; return 0; } // :::::::::::::::::::::::::::::::::::Clock Function:::::::::::::::::::::::::::: double diffclock(clock_t clock1,clock_t clock2){ double diffticks=clock2-clock1; double diffms=(diffticks)/(CLOCKS_PER_SEC/1); // /1000 mili return diffms; } // :::::::::::::::::::::::::::::::::::::::Main::::::::::::::::::::::::::::::::. int main(){ double T1,T2; // Time flags double *Vec1 = (double*)malloc((SIZE)*sizeof(double)); // Elements to compute. CPU way double *Total2 = (double*)malloc((SIZE)*sizeof(double)); // GPU double *Total1 = (double*)malloc(sizeof(double)*(SIZE)); // Total Variables. // Fill the containers vectors of data Fill_vec(Vec1,1.0); Fill_vec(Total2,0.0); // Register time to finish the algorithm // Secuential clock_t start = clock(); h_prefix_sum(Vec1,Total1); clock_t end = clock(); T1=diffclock(start,end); //Show_vec(Total1); //cout<<"Serial Result: "<<*Total1<<" At "<<T1<<",Seconds"<<endl; // Parallel // first case: Total2[0]=Vec1[0]; d_VectorMult(Vec1,Total2); //Show_vec(Total2); Checksum(Total1,Total2); // releasing Memory free(Vec1); free(Total1); free(Total2); return 0; }
a1f55acd3812b1183294dcc1f612d95eba5d0333.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include <bits/stdc++.h> using namespace std; #define debug 0 __global__ void useless(){} __global__ void init(int n, int *arr, int val){ int index = (blockIdx.x * blockDim.x) + threadIdx.x; if(index < n){ arr[index] = val; } } __global__ void set_val(int *arr, int index, int val){ arr[index] = val; } __global__ void BFS_step(int *frontier_in, int *frontier_out, int *off, int *adj, int *aux, int *parents){ int index = (blockIdx.x * blockDim.x) + threadIdx.x; int cnt = frontier_in[0]; if(index < cnt){ int c = 0; int s = frontier_in[index+1]; for(int i=0; i<(off[s+1]-off[s]); i++){ int d = adj[off[s] + i]; if(atomicCAS(&parents[d], -1, s) == -1){ aux[off[s] + c] = d; c++; } } int start = atomicAdd(&frontier_out[0], c); for(int i=0; i<c; i++){ frontier_out[start + i + 1] = aux[off[s] + i]; } } } void HyperBFS(int nv, int mv, int nh, int mh, int source, string outfile, int *offv, int *offh, int *adjv, int *adjh){ int *parentsv; int *parentsh; hipMalloc(&parentsv, nv * sizeof(int)); hipMalloc(&parentsh, nh * sizeof(int)); hipLaunchKernelGGL(( init), dim3((nv+31)/32), dim3(32), 0, 0, nv, parentsv, -1); hipLaunchKernelGGL(( init), dim3((nh+31)/32), dim3(32), 0, 0, nh, parentsh, -1); int *auxv; int *auxh; hipMalloc(&auxv, mv * sizeof(int)); hipMalloc(&auxh, mh * sizeof(int)); int *frontierv; int *frontierh; hipMalloc(&frontierv, (nv + 1) * sizeof(int)); hipMalloc(&frontierh, (nh + 1) * sizeof(int)); int *check = (int *) malloc(sizeof(int)); hipLaunchKernelGGL(( set_val), dim3(1),dim3(1), 0, 0, frontierh, 0, 0); hipLaunchKernelGGL(( set_val), dim3(1),dim3(1), 0, 0, frontierv, 0, 1); hipLaunchKernelGGL(( set_val), dim3(1),dim3(1), 0, 0, frontierv, 1, source); hipLaunchKernelGGL(( set_val), dim3(1),dim3(1), 0, 0, parentsv, source, source); while(1){ // HyperBFS main loop hipMemcpy(check, frontierv, sizeof(int), hipMemcpyDeviceToHost); if(*check == 0) break; hipLaunchKernelGGL(( BFS_step), dim3((*check+31)/32), dim3(32), 0, 0, frontierv, frontierh, offv, adjv, auxv, parentsh); hipLaunchKernelGGL(( set_val), dim3(1),dim3(1), 0, 0, frontierv, 0, 0); if(debug){ int *frontier = (int *) malloc((nh + 1) * sizeof(int)); hipMemcpy(frontier, frontierh, (nh + 1) * sizeof(int), hipMemcpyDeviceToHost); cout << "frontierh "; for(int i=0; i<=nh; i++){ cout << frontier[i] << " "; } cout << endl; int *parents = (int *) malloc(nh * sizeof(int)); hipMemcpy(parents, parentsh, nh * sizeof(int), hipMemcpyDeviceToHost); cout << "parentsh "; for(int i=0; i<nh; i++){ cout << parents[i] << " "; } cout << endl; } hipMemcpy(check, frontierh, sizeof(int), hipMemcpyDeviceToHost); if(*check == 0) break; hipLaunchKernelGGL(( BFS_step), dim3((*check+31)/32), dim3(32), 0, 0, frontierh, frontierv, offh, adjh, auxh, parentsv); hipLaunchKernelGGL(( set_val), dim3(1),dim3(1), 0, 0, frontierh, 0, 0); if(debug){ int *frontier = (int *) malloc((nv + 1) * sizeof(int)); hipMemcpy(frontier, frontierv, (nv + 1) * sizeof(int), hipMemcpyDeviceToHost); cout << "frontierv "; for(int i=0; i<=nv; i++){ cout << frontier[i] << " "; } cout << endl; int *parents = (int *) malloc(nv * sizeof(int)); hipMemcpy(parents, parentsv, nv * sizeof(int), hipMemcpyDeviceToHost); cout << "parentsv "; for(int i=0; i<nv; i++){ cout << parents[i] << " "; } cout << endl; } } hipDeviceSynchronize(); } __global__ void init_neg(int n, int *arr, int *neg){ int index = (blockIdx.x * blockDim.x) + threadIdx.x; if(index < n){ arr[index] = -neg[index]; } } __global__ void BPath_step(int *frontier_in, int *frontier_out, int *off, int *adj, int *aux, int *parents, int *worklist){ int index = (blockIdx.x * blockDim.x) + threadIdx.x; int cnt = frontier_in[0]; if(index < cnt){ int c = 0; int s = frontier_in[index+1]; for(int i=0; i<(off[s+1]-off[s]); i++){ int d = adj[off[s] + i]; int old = atomicAdd(&parents[d], 1); if(old == -1){ parents[d] = s; } else{ if(atomicCAS(&worklist[d], 0, 1) == 0){ aux[off[s] + c] = d; c++; } } } int start = atomicAdd(&frontier_out[0], c); for(int i=0; i<c; i++){ frontier_out[start + i + 1] = aux[off[s] + i]; } } } void HyperBPath(int nv, int mv, int nh, int mh, int source, string outfile, int *offv, int *offh, int *adjv, int *adjh, int *incntv, int *incnth){ int *parentsv; int *parentsh; int *worklist; hipMalloc(&parentsv, nv * sizeof(int)); hipMalloc(&parentsh, nh * sizeof(int)); hipMalloc(&worklist, nh * sizeof(int)); hipLaunchKernelGGL(( init), dim3((nv+31)/32), dim3(32), 0, 0, nv, parentsv, -1); hipLaunchKernelGGL(( init_neg), dim3((nh+31)/32), dim3(32), 0, 0, nh, parentsh, incnth); hipLaunchKernelGGL(( init), dim3((nh+31)/32), dim3(32), 0, 0, nh, worklist, 0); int *auxv; int *auxh; hipMalloc(&auxv, mv * sizeof(int)); hipMalloc(&auxh, mh * sizeof(int)); int *frontierv; int *frontierh; hipMalloc(&frontierv, (nv + 1) * sizeof(int)); hipMalloc(&frontierh, (nh + 1) * sizeof(int)); int *check = (int *) malloc(sizeof(int)); hipLaunchKernelGGL(( set_val), dim3(1),dim3(1), 0, 0, frontierh, 0, 0); hipLaunchKernelGGL(( set_val), dim3(1),dim3(1), 0, 0, frontierv, 0, 1); hipLaunchKernelGGL(( set_val), dim3(1),dim3(1), 0, 0, frontierv, 1, source); hipLaunchKernelGGL(( set_val), dim3(1),dim3(1), 0, 0, parentsv, source, source); while(1){ // HyperBFS main loop hipMemcpy(check, frontierv, sizeof(int), hipMemcpyDeviceToHost); if(*check == 0) break; hipLaunchKernelGGL(( BPath_step), dim3((*check+31)/32), dim3(32), 0, 0, frontierv, frontierh, offv, adjv, auxv, parentsh, worklist); hipLaunchKernelGGL(( set_val), dim3(1),dim3(1), 0, 0, frontierv, 0, 0); hipLaunchKernelGGL(( init), dim3((nh+31)/32), dim3(32), 0, 0, nh, worklist, 0); if(debug){ int *frontier = (int *) malloc((nh + 1) * sizeof(int)); hipMemcpy(frontier, frontierh, (nh + 1) * sizeof(int), hipMemcpyDeviceToHost); cout << "frontierh "; for(int i=0; i<=nh; i++){ cout << frontier[i] << " "; } cout << endl; int *parents = (int *) malloc(nh * sizeof(int)); hipMemcpy(parents, parentsh, nh * sizeof(int), hipMemcpyDeviceToHost); cout << "parentsh "; for(int i=0; i<nh; i++){ cout << parents[i] << " "; } cout << endl; } hipMemcpy(check, frontierh, sizeof(int), hipMemcpyDeviceToHost); if(*check == 0) break; hipLaunchKernelGGL(( BFS_step), dim3((*check+31)/32), dim3(32), 0, 0, frontierh, frontierv, offh, adjh, auxh, parentsv); hipLaunchKernelGGL(( set_val), dim3(1),dim3(1), 0, 0, frontierh, 0, 0); if(debug){ int *frontier = (int *) malloc((nv + 1) * sizeof(int)); hipMemcpy(frontier, frontierv, (nv + 1) * sizeof(int), hipMemcpyDeviceToHost); cout << "frontierv "; for(int i=0; i<=nv; i++){ cout << frontier[i] << " "; } cout << endl; int *parents = (int *) malloc(nv * sizeof(int)); hipMemcpy(parents, parentsv, nv * sizeof(int), hipMemcpyDeviceToHost); cout << "parentsv "; for(int i=0; i<nv; i++){ cout << parents[i] << " "; } cout << endl; } } hipDeviceSynchronize(); } __global__ void SSSP_step(int *frontier_in, int *frontier_out, int *off, int *adj, int *wgh, int *aux, int *visit, int *shortest_in, int *shortest_out){ int index = (blockIdx.x * blockDim.x) + threadIdx.x; int cnt = frontier_in[0]; if(index < cnt){ int c = 0; int s = frontier_in[index+1]; for(int i=0; i<(off[s+1]-off[s]); i++){ int d = adj[off[s] + i]; int newdist = shortest_in[s] + wgh[off[s] + i]; int old = shortest_out[d]; if(newdist < old){ atomicMin(&shortest_out[d], newdist); if(atomicCAS(&visit[d], 0, 1) == 0){ aux[off[s] + c] = d; c++; } } } int start = atomicAdd(&frontier_out[0], c); for(int i=0; i<c; i++){ frontier_out[start + i + 1] = aux[off[s] + i]; } } } void HyperSSSP(int nv, int mv, int nh, int mh, int source, string outfile, int *offv, int *offh, int *adjv, int *adjh, int *wghv, int *wghh){ int *visitv; int *visith; hipMalloc(&visitv, nv * sizeof(int)); hipMalloc(&visith, nh * sizeof(int)); hipLaunchKernelGGL(( init), dim3((nv+31)/32), dim3(32), 0, 0, nv, visitv, 0); hipLaunchKernelGGL(( init), dim3((nh+31)/32), dim3(32), 0, 0, nh, visith, 0); int *shortestv; int *shortesth; hipMalloc(&shortestv, nv * sizeof(int)); hipMalloc(&shortesth, nh * sizeof(int)); hipLaunchKernelGGL(( init), dim3((nv+31)/32), dim3(32), 0, 0, nv, shortestv, INT_MAX/2); hipLaunchKernelGGL(( init), dim3((nh+31)/32), dim3(32), 0, 0, nh, shortesth, INT_MAX/2); int *auxv; int *auxh; hipMalloc(&auxv, mv * sizeof(int)); hipMalloc(&auxh, mh * sizeof(int)); int *frontierv; int *frontierh; hipMalloc(&frontierv, (nv + 1) * sizeof(int)); hipMalloc(&frontierh, (nh + 1) * sizeof(int)); int *check = (int *) malloc(sizeof(int)); hipLaunchKernelGGL(( set_val), dim3(1),dim3(1), 0, 0, frontierh, 0, 0); hipLaunchKernelGGL(( set_val), dim3(1),dim3(1), 0, 0, frontierv, 0, 1); hipLaunchKernelGGL(( set_val), dim3(1),dim3(1), 0, 0, frontierv, 1, source); hipLaunchKernelGGL(( set_val), dim3(1),dim3(1), 0, 0, shortestv, source, 0); int round = 0; while(1){ // HyperSSSP main loop if(round == nv-1){ hipLaunchKernelGGL(( init), dim3((nv+31)/32), dim3(32), 0, 0, nv, shortestv, -INT_MAX/2); break; } hipMemcpy(check, frontierv, sizeof(int), hipMemcpyDeviceToHost); if(*check == 0) break; hipLaunchKernelGGL(( SSSP_step), dim3((*check+31)/32), dim3(32), 0, 0, frontierv, frontierh, offv, adjv, wghv, auxv, visith, shortestv, shortesth); hipLaunchKernelGGL(( set_val), dim3(1),dim3(1), 0, 0, frontierv, 0, 0); hipLaunchKernelGGL(( init), dim3((nh+31)/32), dim3(32), 0, 0, nh, visith, 0); if(debug){ int *frontier = (int *) malloc((nh + 1) * sizeof(int)); hipMemcpy(frontier, frontierh, (nh + 1) * sizeof(int), hipMemcpyDeviceToHost); cout << "frontierh "; for(int i=0; i<=nh; i++){ cout << frontier[i] << " "; } cout << endl; int *shortest = (int *) malloc(nh * sizeof(int)); hipMemcpy(shortest, shortesth, nh * sizeof(int), hipMemcpyDeviceToHost); cout << "shortesth "; for(int i=0; i<nh; i++){ cout << shortest[i] << " "; } cout << endl; } hipMemcpy(check, frontierh, sizeof(int), hipMemcpyDeviceToHost); if(*check == 0) break; hipLaunchKernelGGL(( SSSP_step), dim3((*check+31)/32), dim3(32), 0, 0, frontierh, frontierv, offh, adjh, wghh, auxh, visitv, shortesth, shortestv); hipLaunchKernelGGL(( set_val), dim3(1),dim3(1), 0, 0, frontierh, 0, 0); hipLaunchKernelGGL(( init), dim3((nv+31)/32), dim3(32), 0, 0, nv, visitv, 0); round++; if(debug){ int *frontier = (int *) malloc((nv + 1) * sizeof(int)); hipMemcpy(frontier, frontierv, (nv + 1) * sizeof(int), hipMemcpyDeviceToHost); cout << "frontierv "; for(int i=0; i<=nv; i++){ cout << frontier[i] << " "; } cout << endl; int *shortest = (int *) malloc(nv * sizeof(int)); hipMemcpy(shortest, shortestv, nv * sizeof(int), hipMemcpyDeviceToHost); cout << "shortestv "; for(int i=0; i<nv; i++){ cout << shortest[i] << " "; } cout << endl; } } hipDeviceSynchronize(); ofstream fout; fout.open(outfile); int *shortest; shortest = (int *) malloc(nv * sizeof(int)); hipMemcpy(shortest, shortestv, nv * sizeof(int), hipMemcpyDeviceToHost); for(int i=0; i<nv; i++){ fout << shortest[i] << " "; } fout << endl; shortest = (int *) malloc(nh * sizeof(int)); hipMemcpy(shortest, shortesth, nh * sizeof(int), hipMemcpyDeviceToHost); for(int i=0; i<nh; i++){ fout << shortest[i] << " "; } fout << endl; fout.close(); } __global__ void init_index(int n, int *arr){ int index = (blockIdx.x * blockDim.x) + threadIdx.x; if(index < n){ arr[index+1] = index; } } __global__ void init_float(int n, float *arr, float val){ int index = (blockIdx.x * blockDim.x) + threadIdx.x; if(index < n){ arr[index] = val; } } __global__ void PageRank_step(int *frontier_in, int *off, int *adj, float *pval_in, float *pval_out){ int index = (blockIdx.x * blockDim.x) + threadIdx.x; int cnt = frontier_in[0]; if(index < cnt){ int s = frontier_in[index+1]; float add_val = pval_in[s] / (off[s+1] - off[s]); for(int i=0; i<(off[s+1]-off[s]); i++){ int d = adj[off[s] + i]; atomicAdd(&pval_out[d], add_val); } } } __global__ void PageRank_norm(int n, float *pval, float damp, float addconst){ int index = (blockIdx.x * blockDim.x) + threadIdx.x; if(index < n){ pval[index] = damp * pval[index] + addconst; } } void HyperPageRank(int nv, int mv, int nh, int mh, int source, string outfile, int *offv, int *offh, int *adjv, int *adjh, int maxiter){ float *pvalv; float *pvalh; hipMalloc(&pvalv, nv * sizeof(float)); hipMalloc(&pvalh, nh * sizeof(float)); hipLaunchKernelGGL(( init_float), dim3((nv+31)/32), dim3(32), 0, 0, nv, pvalv, 1.0/((float)nv)); int *frontierv; int *frontierh; hipMalloc(&frontierv, (nv + 1) * sizeof(int)); hipMalloc(&frontierh, (nh + 1) * sizeof(int)); hipLaunchKernelGGL(( init_index), dim3((nv+31)/32), dim3(32), 0, 0, nv, frontierv); hipLaunchKernelGGL(( init_index), dim3((nh+31)/32), dim3(32), 0, 0, nh, frontierh); hipLaunchKernelGGL(( set_val), dim3(1),dim3(1), 0, 0, frontierh, 0, nh); hipLaunchKernelGGL(( set_val), dim3(1),dim3(1), 0, 0, frontierv, 0, nv); float damp = 0.85; float addconstv = (1.0 - damp)*(1/(float) nv); float addconsth = (1.0 - damp)*(1/(float) nh); for(int iter = 0; iter < maxiter; iter++){ // HyperPageRank main loop hipLaunchKernelGGL(( init_float), dim3((nh+31)/32), dim3(32), 0, 0, nh, pvalh, 0.0); hipLaunchKernelGGL(( PageRank_step), dim3((nv+31)/32), dim3(32), 0, 0, frontierv, offv, adjv, pvalv, pvalh); if(debug){ float *pval = (float *) malloc(nh * sizeof(float)); hipMemcpy(pval, pvalh, nh * sizeof(float), hipMemcpyDeviceToHost); cout << "pvalh "; for(int i=0; i<nh; i++){ printf("%.6f ", pval[i]); } cout << endl; } hipLaunchKernelGGL(( init_float), dim3((nv+31)/32), dim3(32), 0, 0, nv, pvalv, 0.0); hipLaunchKernelGGL(( PageRank_step), dim3((nh+31)/32), dim3(32), 0, 0, frontierh, offh, adjh, pvalh, pvalv); hipLaunchKernelGGL(( PageRank_norm), dim3((nv+31)/32), dim3(32), 0, 0, nv, pvalv, damp, addconstv); if(debug){ float *pval = (float *) malloc(nv * sizeof(float)); hipMemcpy(pval, pvalv, nv * sizeof(float), hipMemcpyDeviceToHost); cout << "pvalv "; for(int i=0; i<nv; i++){ printf("%.6f ", pval[i]); } cout << endl; } } hipDeviceSynchronize(); ofstream fout; fout.open(outfile); float *pval = (float *) malloc(nv * sizeof(float)); hipMemcpy(pval, pvalv, nv * sizeof(float), hipMemcpyDeviceToHost); for(int i=0; i<nv; i++){ fout << setprecision(6) << pval[i] << " "; } fout << endl; fout.close(); } // main code int main(int argc, char **argv){ string algorithm(argv[1]); string infile(argv[2]); string outfile(argv[3]); ifstream fin; fin.open(infile); // read hypergraph parameters string no_use; fin >> no_use; int nv, mv, nh, mh; fin >> nv; fin >> mv; fin >> nh; fin >> mh; int *offv = (int *) malloc((nv + 1) * sizeof(int)); int *offh = (int *) malloc((nh + 1) * sizeof(int)); int *adjv = (int *) malloc(mv * sizeof(int)); int *adjh = (int *) malloc(mh * sizeof(int)); int *wghv = (int *) malloc(mv * sizeof(int)); int *wghh = (int *) malloc(mh * sizeof(int)); int *incntv = (int *) malloc(nv * sizeof(int)); int *incnth = (int *) malloc(nh * sizeof(int)); // read vertex offsets for(int i=0; i<nv; i++){ fin >> offv[i]; } offv[nv] = mv; // read vertex adjacency lists for(int i=0; i<mv; i++){ fin >> adjv[i]; incnth[adjv[i]]++; } // read vertex weights list for(int i=0; i<mv; i++){ fin >> wghv[i]; } // read hyperedge offsets for(int i=0; i<nh; i++){ fin >> offh[i]; } offh[nh] = mh; // read hyperedge adjacency lists for(int i=0; i<mh; i++){ fin >> adjh[i]; incntv[adjh[i]]++; } // read hyperedge weights list for(int i=0; i<mh; i++){ fin >> wghh[i]; } fin.close(); // copy all arrays to GPU int *gpu_offv; int *gpu_offh; int *gpu_adjv; int *gpu_adjh; int *gpu_wghv; int *gpu_wghh; int *gpu_incntv; int *gpu_incnth; hipMalloc(&gpu_offv, (nv + 1) * sizeof(int)); hipMalloc(&gpu_offh, (nh + 1) * sizeof(int)); hipMalloc(&gpu_adjv, mv * sizeof(int)); hipMalloc(&gpu_adjh, mh * sizeof(int)); hipMalloc(&gpu_wghv, mv * sizeof(int)); hipMalloc(&gpu_wghh, mh * sizeof(int)); hipMalloc(&gpu_incntv, nv * sizeof(int)); hipMalloc(&gpu_incnth, nh * sizeof(int)); hipMemcpy(gpu_offv, offv, (nv + 1) * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(gpu_offh, offh, (nh + 1) * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(gpu_adjv, adjv, mv * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(gpu_adjh, adjh, mh * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(gpu_wghv, wghv, mv * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(gpu_wghh, wghh, mh * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(gpu_incntv, incntv, nv * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(gpu_incnth, incnth, nh * sizeof(int), hipMemcpyHostToDevice); // timing variables hipEvent_t start, stop; float milliseconds; // to avoid first extra time hipLaunchKernelGGL(( useless), dim3(1),dim3(1), 0, 0, ); hipDeviceSynchronize(); if(algorithm == "BFS"){ for(int i=0; i<4; i++){ milliseconds = 0; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); // Call BFS on HyperGraph HyperBFS(nv, mv, nh, mh, 0, outfile, gpu_offv, gpu_offh, gpu_adjv, gpu_adjh); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&milliseconds, start, stop); printf("Time taken by HyperBFS function to execute is: %.6f ms\n", milliseconds); } } if(algorithm == "BPath"){ for(int i=0; i<4; i++){ milliseconds = 0; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); // Call BPath on HyperGraph HyperBPath(nv, mv, nh, mh, 0, outfile, gpu_offv, gpu_offh, gpu_adjv, gpu_adjh, gpu_incntv, gpu_incnth); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&milliseconds, start, stop); printf("Time taken by HyperBPath function to execute is: %.6f ms\n", milliseconds); } } if(algorithm == "SSSP"){ for(int i=0; i<4; i++){ milliseconds = 0; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); // Call SSSP on HyperGraph HyperSSSP(nv, mv, nh, mh, 0, outfile, gpu_offv, gpu_offh, gpu_adjv, gpu_adjh, gpu_wghv, gpu_wghh); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&milliseconds, start, stop); printf("Time taken by HyperSSSP function to execute is: %.6f ms\n", milliseconds); } } if(algorithm == "PageRank"){ for(int i=0; i<4; i++){ milliseconds = 0; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); // Call BFS on HyperGraph HyperPageRank(nv, mv, nh, mh, 0, outfile, gpu_offv, gpu_offh, gpu_adjv, gpu_adjh, 1); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&milliseconds, start, stop); printf("Time taken by HyperPageRank function to execute is: %.6f ms\n", milliseconds); } } return 0; }
a1f55acd3812b1183294dcc1f612d95eba5d0333.cu
#include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include <bits/stdc++.h> using namespace std; #define debug 0 __global__ void useless(){} __global__ void init(int n, int *arr, int val){ int index = (blockIdx.x * blockDim.x) + threadIdx.x; if(index < n){ arr[index] = val; } } __global__ void set_val(int *arr, int index, int val){ arr[index] = val; } __global__ void BFS_step(int *frontier_in, int *frontier_out, int *off, int *adj, int *aux, int *parents){ int index = (blockIdx.x * blockDim.x) + threadIdx.x; int cnt = frontier_in[0]; if(index < cnt){ int c = 0; int s = frontier_in[index+1]; for(int i=0; i<(off[s+1]-off[s]); i++){ int d = adj[off[s] + i]; if(atomicCAS(&parents[d], -1, s) == -1){ aux[off[s] + c] = d; c++; } } int start = atomicAdd(&frontier_out[0], c); for(int i=0; i<c; i++){ frontier_out[start + i + 1] = aux[off[s] + i]; } } } void HyperBFS(int nv, int mv, int nh, int mh, int source, string outfile, int *offv, int *offh, int *adjv, int *adjh){ int *parentsv; int *parentsh; cudaMalloc(&parentsv, nv * sizeof(int)); cudaMalloc(&parentsh, nh * sizeof(int)); init<<<(nv+31)/32, 32>>>(nv, parentsv, -1); init<<<(nh+31)/32, 32>>>(nh, parentsh, -1); int *auxv; int *auxh; cudaMalloc(&auxv, mv * sizeof(int)); cudaMalloc(&auxh, mh * sizeof(int)); int *frontierv; int *frontierh; cudaMalloc(&frontierv, (nv + 1) * sizeof(int)); cudaMalloc(&frontierh, (nh + 1) * sizeof(int)); int *check = (int *) malloc(sizeof(int)); set_val<<<1,1>>>(frontierh, 0, 0); set_val<<<1,1>>>(frontierv, 0, 1); set_val<<<1,1>>>(frontierv, 1, source); set_val<<<1,1>>>(parentsv, source, source); while(1){ // HyperBFS main loop cudaMemcpy(check, frontierv, sizeof(int), cudaMemcpyDeviceToHost); if(*check == 0) break; BFS_step<<<(*check+31)/32, 32>>>(frontierv, frontierh, offv, adjv, auxv, parentsh); set_val<<<1,1>>>(frontierv, 0, 0); if(debug){ int *frontier = (int *) malloc((nh + 1) * sizeof(int)); cudaMemcpy(frontier, frontierh, (nh + 1) * sizeof(int), cudaMemcpyDeviceToHost); cout << "frontierh "; for(int i=0; i<=nh; i++){ cout << frontier[i] << " "; } cout << endl; int *parents = (int *) malloc(nh * sizeof(int)); cudaMemcpy(parents, parentsh, nh * sizeof(int), cudaMemcpyDeviceToHost); cout << "parentsh "; for(int i=0; i<nh; i++){ cout << parents[i] << " "; } cout << endl; } cudaMemcpy(check, frontierh, sizeof(int), cudaMemcpyDeviceToHost); if(*check == 0) break; BFS_step<<<(*check+31)/32, 32>>>(frontierh, frontierv, offh, adjh, auxh, parentsv); set_val<<<1,1>>>(frontierh, 0, 0); if(debug){ int *frontier = (int *) malloc((nv + 1) * sizeof(int)); cudaMemcpy(frontier, frontierv, (nv + 1) * sizeof(int), cudaMemcpyDeviceToHost); cout << "frontierv "; for(int i=0; i<=nv; i++){ cout << frontier[i] << " "; } cout << endl; int *parents = (int *) malloc(nv * sizeof(int)); cudaMemcpy(parents, parentsv, nv * sizeof(int), cudaMemcpyDeviceToHost); cout << "parentsv "; for(int i=0; i<nv; i++){ cout << parents[i] << " "; } cout << endl; } } cudaDeviceSynchronize(); } __global__ void init_neg(int n, int *arr, int *neg){ int index = (blockIdx.x * blockDim.x) + threadIdx.x; if(index < n){ arr[index] = -neg[index]; } } __global__ void BPath_step(int *frontier_in, int *frontier_out, int *off, int *adj, int *aux, int *parents, int *worklist){ int index = (blockIdx.x * blockDim.x) + threadIdx.x; int cnt = frontier_in[0]; if(index < cnt){ int c = 0; int s = frontier_in[index+1]; for(int i=0; i<(off[s+1]-off[s]); i++){ int d = adj[off[s] + i]; int old = atomicAdd(&parents[d], 1); if(old == -1){ parents[d] = s; } else{ if(atomicCAS(&worklist[d], 0, 1) == 0){ aux[off[s] + c] = d; c++; } } } int start = atomicAdd(&frontier_out[0], c); for(int i=0; i<c; i++){ frontier_out[start + i + 1] = aux[off[s] + i]; } } } void HyperBPath(int nv, int mv, int nh, int mh, int source, string outfile, int *offv, int *offh, int *adjv, int *adjh, int *incntv, int *incnth){ int *parentsv; int *parentsh; int *worklist; cudaMalloc(&parentsv, nv * sizeof(int)); cudaMalloc(&parentsh, nh * sizeof(int)); cudaMalloc(&worklist, nh * sizeof(int)); init<<<(nv+31)/32, 32>>>(nv, parentsv, -1); init_neg<<<(nh+31)/32, 32>>>(nh, parentsh, incnth); init<<<(nh+31)/32, 32>>>(nh, worklist, 0); int *auxv; int *auxh; cudaMalloc(&auxv, mv * sizeof(int)); cudaMalloc(&auxh, mh * sizeof(int)); int *frontierv; int *frontierh; cudaMalloc(&frontierv, (nv + 1) * sizeof(int)); cudaMalloc(&frontierh, (nh + 1) * sizeof(int)); int *check = (int *) malloc(sizeof(int)); set_val<<<1,1>>>(frontierh, 0, 0); set_val<<<1,1>>>(frontierv, 0, 1); set_val<<<1,1>>>(frontierv, 1, source); set_val<<<1,1>>>(parentsv, source, source); while(1){ // HyperBFS main loop cudaMemcpy(check, frontierv, sizeof(int), cudaMemcpyDeviceToHost); if(*check == 0) break; BPath_step<<<(*check+31)/32, 32>>>(frontierv, frontierh, offv, adjv, auxv, parentsh, worklist); set_val<<<1,1>>>(frontierv, 0, 0); init<<<(nh+31)/32, 32>>>(nh, worklist, 0); if(debug){ int *frontier = (int *) malloc((nh + 1) * sizeof(int)); cudaMemcpy(frontier, frontierh, (nh + 1) * sizeof(int), cudaMemcpyDeviceToHost); cout << "frontierh "; for(int i=0; i<=nh; i++){ cout << frontier[i] << " "; } cout << endl; int *parents = (int *) malloc(nh * sizeof(int)); cudaMemcpy(parents, parentsh, nh * sizeof(int), cudaMemcpyDeviceToHost); cout << "parentsh "; for(int i=0; i<nh; i++){ cout << parents[i] << " "; } cout << endl; } cudaMemcpy(check, frontierh, sizeof(int), cudaMemcpyDeviceToHost); if(*check == 0) break; BFS_step<<<(*check+31)/32, 32>>>(frontierh, frontierv, offh, adjh, auxh, parentsv); set_val<<<1,1>>>(frontierh, 0, 0); if(debug){ int *frontier = (int *) malloc((nv + 1) * sizeof(int)); cudaMemcpy(frontier, frontierv, (nv + 1) * sizeof(int), cudaMemcpyDeviceToHost); cout << "frontierv "; for(int i=0; i<=nv; i++){ cout << frontier[i] << " "; } cout << endl; int *parents = (int *) malloc(nv * sizeof(int)); cudaMemcpy(parents, parentsv, nv * sizeof(int), cudaMemcpyDeviceToHost); cout << "parentsv "; for(int i=0; i<nv; i++){ cout << parents[i] << " "; } cout << endl; } } cudaDeviceSynchronize(); } __global__ void SSSP_step(int *frontier_in, int *frontier_out, int *off, int *adj, int *wgh, int *aux, int *visit, int *shortest_in, int *shortest_out){ int index = (blockIdx.x * blockDim.x) + threadIdx.x; int cnt = frontier_in[0]; if(index < cnt){ int c = 0; int s = frontier_in[index+1]; for(int i=0; i<(off[s+1]-off[s]); i++){ int d = adj[off[s] + i]; int newdist = shortest_in[s] + wgh[off[s] + i]; int old = shortest_out[d]; if(newdist < old){ atomicMin(&shortest_out[d], newdist); if(atomicCAS(&visit[d], 0, 1) == 0){ aux[off[s] + c] = d; c++; } } } int start = atomicAdd(&frontier_out[0], c); for(int i=0; i<c; i++){ frontier_out[start + i + 1] = aux[off[s] + i]; } } } void HyperSSSP(int nv, int mv, int nh, int mh, int source, string outfile, int *offv, int *offh, int *adjv, int *adjh, int *wghv, int *wghh){ int *visitv; int *visith; cudaMalloc(&visitv, nv * sizeof(int)); cudaMalloc(&visith, nh * sizeof(int)); init<<<(nv+31)/32, 32>>>(nv, visitv, 0); init<<<(nh+31)/32, 32>>>(nh, visith, 0); int *shortestv; int *shortesth; cudaMalloc(&shortestv, nv * sizeof(int)); cudaMalloc(&shortesth, nh * sizeof(int)); init<<<(nv+31)/32, 32>>>(nv, shortestv, INT_MAX/2); init<<<(nh+31)/32, 32>>>(nh, shortesth, INT_MAX/2); int *auxv; int *auxh; cudaMalloc(&auxv, mv * sizeof(int)); cudaMalloc(&auxh, mh * sizeof(int)); int *frontierv; int *frontierh; cudaMalloc(&frontierv, (nv + 1) * sizeof(int)); cudaMalloc(&frontierh, (nh + 1) * sizeof(int)); int *check = (int *) malloc(sizeof(int)); set_val<<<1,1>>>(frontierh, 0, 0); set_val<<<1,1>>>(frontierv, 0, 1); set_val<<<1,1>>>(frontierv, 1, source); set_val<<<1,1>>>(shortestv, source, 0); int round = 0; while(1){ // HyperSSSP main loop if(round == nv-1){ init<<<(nv+31)/32, 32>>>(nv, shortestv, -INT_MAX/2); break; } cudaMemcpy(check, frontierv, sizeof(int), cudaMemcpyDeviceToHost); if(*check == 0) break; SSSP_step<<<(*check+31)/32, 32>>>(frontierv, frontierh, offv, adjv, wghv, auxv, visith, shortestv, shortesth); set_val<<<1,1>>>(frontierv, 0, 0); init<<<(nh+31)/32, 32>>>(nh, visith, 0); if(debug){ int *frontier = (int *) malloc((nh + 1) * sizeof(int)); cudaMemcpy(frontier, frontierh, (nh + 1) * sizeof(int), cudaMemcpyDeviceToHost); cout << "frontierh "; for(int i=0; i<=nh; i++){ cout << frontier[i] << " "; } cout << endl; int *shortest = (int *) malloc(nh * sizeof(int)); cudaMemcpy(shortest, shortesth, nh * sizeof(int), cudaMemcpyDeviceToHost); cout << "shortesth "; for(int i=0; i<nh; i++){ cout << shortest[i] << " "; } cout << endl; } cudaMemcpy(check, frontierh, sizeof(int), cudaMemcpyDeviceToHost); if(*check == 0) break; SSSP_step<<<(*check+31)/32, 32>>>(frontierh, frontierv, offh, adjh, wghh, auxh, visitv, shortesth, shortestv); set_val<<<1,1>>>(frontierh, 0, 0); init<<<(nv+31)/32, 32>>>(nv, visitv, 0); round++; if(debug){ int *frontier = (int *) malloc((nv + 1) * sizeof(int)); cudaMemcpy(frontier, frontierv, (nv + 1) * sizeof(int), cudaMemcpyDeviceToHost); cout << "frontierv "; for(int i=0; i<=nv; i++){ cout << frontier[i] << " "; } cout << endl; int *shortest = (int *) malloc(nv * sizeof(int)); cudaMemcpy(shortest, shortestv, nv * sizeof(int), cudaMemcpyDeviceToHost); cout << "shortestv "; for(int i=0; i<nv; i++){ cout << shortest[i] << " "; } cout << endl; } } cudaDeviceSynchronize(); ofstream fout; fout.open(outfile); int *shortest; shortest = (int *) malloc(nv * sizeof(int)); cudaMemcpy(shortest, shortestv, nv * sizeof(int), cudaMemcpyDeviceToHost); for(int i=0; i<nv; i++){ fout << shortest[i] << " "; } fout << endl; shortest = (int *) malloc(nh * sizeof(int)); cudaMemcpy(shortest, shortesth, nh * sizeof(int), cudaMemcpyDeviceToHost); for(int i=0; i<nh; i++){ fout << shortest[i] << " "; } fout << endl; fout.close(); } __global__ void init_index(int n, int *arr){ int index = (blockIdx.x * blockDim.x) + threadIdx.x; if(index < n){ arr[index+1] = index; } } __global__ void init_float(int n, float *arr, float val){ int index = (blockIdx.x * blockDim.x) + threadIdx.x; if(index < n){ arr[index] = val; } } __global__ void PageRank_step(int *frontier_in, int *off, int *adj, float *pval_in, float *pval_out){ int index = (blockIdx.x * blockDim.x) + threadIdx.x; int cnt = frontier_in[0]; if(index < cnt){ int s = frontier_in[index+1]; float add_val = pval_in[s] / (off[s+1] - off[s]); for(int i=0; i<(off[s+1]-off[s]); i++){ int d = adj[off[s] + i]; atomicAdd(&pval_out[d], add_val); } } } __global__ void PageRank_norm(int n, float *pval, float damp, float addconst){ int index = (blockIdx.x * blockDim.x) + threadIdx.x; if(index < n){ pval[index] = damp * pval[index] + addconst; } } void HyperPageRank(int nv, int mv, int nh, int mh, int source, string outfile, int *offv, int *offh, int *adjv, int *adjh, int maxiter){ float *pvalv; float *pvalh; cudaMalloc(&pvalv, nv * sizeof(float)); cudaMalloc(&pvalh, nh * sizeof(float)); init_float<<<(nv+31)/32, 32>>>(nv, pvalv, 1.0/((float)nv)); int *frontierv; int *frontierh; cudaMalloc(&frontierv, (nv + 1) * sizeof(int)); cudaMalloc(&frontierh, (nh + 1) * sizeof(int)); init_index<<<(nv+31)/32, 32>>>(nv, frontierv); init_index<<<(nh+31)/32, 32>>>(nh, frontierh); set_val<<<1,1>>>(frontierh, 0, nh); set_val<<<1,1>>>(frontierv, 0, nv); float damp = 0.85; float addconstv = (1.0 - damp)*(1/(float) nv); float addconsth = (1.0 - damp)*(1/(float) nh); for(int iter = 0; iter < maxiter; iter++){ // HyperPageRank main loop init_float<<<(nh+31)/32, 32>>>(nh, pvalh, 0.0); PageRank_step<<<(nv+31)/32, 32>>>(frontierv, offv, adjv, pvalv, pvalh); if(debug){ float *pval = (float *) malloc(nh * sizeof(float)); cudaMemcpy(pval, pvalh, nh * sizeof(float), cudaMemcpyDeviceToHost); cout << "pvalh "; for(int i=0; i<nh; i++){ printf("%.6f ", pval[i]); } cout << endl; } init_float<<<(nv+31)/32, 32>>>(nv, pvalv, 0.0); PageRank_step<<<(nh+31)/32, 32>>>(frontierh, offh, adjh, pvalh, pvalv); PageRank_norm<<<(nv+31)/32, 32>>>(nv, pvalv, damp, addconstv); if(debug){ float *pval = (float *) malloc(nv * sizeof(float)); cudaMemcpy(pval, pvalv, nv * sizeof(float), cudaMemcpyDeviceToHost); cout << "pvalv "; for(int i=0; i<nv; i++){ printf("%.6f ", pval[i]); } cout << endl; } } cudaDeviceSynchronize(); ofstream fout; fout.open(outfile); float *pval = (float *) malloc(nv * sizeof(float)); cudaMemcpy(pval, pvalv, nv * sizeof(float), cudaMemcpyDeviceToHost); for(int i=0; i<nv; i++){ fout << setprecision(6) << pval[i] << " "; } fout << endl; fout.close(); } // main code int main(int argc, char **argv){ string algorithm(argv[1]); string infile(argv[2]); string outfile(argv[3]); ifstream fin; fin.open(infile); // read hypergraph parameters string no_use; fin >> no_use; int nv, mv, nh, mh; fin >> nv; fin >> mv; fin >> nh; fin >> mh; int *offv = (int *) malloc((nv + 1) * sizeof(int)); int *offh = (int *) malloc((nh + 1) * sizeof(int)); int *adjv = (int *) malloc(mv * sizeof(int)); int *adjh = (int *) malloc(mh * sizeof(int)); int *wghv = (int *) malloc(mv * sizeof(int)); int *wghh = (int *) malloc(mh * sizeof(int)); int *incntv = (int *) malloc(nv * sizeof(int)); int *incnth = (int *) malloc(nh * sizeof(int)); // read vertex offsets for(int i=0; i<nv; i++){ fin >> offv[i]; } offv[nv] = mv; // read vertex adjacency lists for(int i=0; i<mv; i++){ fin >> adjv[i]; incnth[adjv[i]]++; } // read vertex weights list for(int i=0; i<mv; i++){ fin >> wghv[i]; } // read hyperedge offsets for(int i=0; i<nh; i++){ fin >> offh[i]; } offh[nh] = mh; // read hyperedge adjacency lists for(int i=0; i<mh; i++){ fin >> adjh[i]; incntv[adjh[i]]++; } // read hyperedge weights list for(int i=0; i<mh; i++){ fin >> wghh[i]; } fin.close(); // copy all arrays to GPU int *gpu_offv; int *gpu_offh; int *gpu_adjv; int *gpu_adjh; int *gpu_wghv; int *gpu_wghh; int *gpu_incntv; int *gpu_incnth; cudaMalloc(&gpu_offv, (nv + 1) * sizeof(int)); cudaMalloc(&gpu_offh, (nh + 1) * sizeof(int)); cudaMalloc(&gpu_adjv, mv * sizeof(int)); cudaMalloc(&gpu_adjh, mh * sizeof(int)); cudaMalloc(&gpu_wghv, mv * sizeof(int)); cudaMalloc(&gpu_wghh, mh * sizeof(int)); cudaMalloc(&gpu_incntv, nv * sizeof(int)); cudaMalloc(&gpu_incnth, nh * sizeof(int)); cudaMemcpy(gpu_offv, offv, (nv + 1) * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(gpu_offh, offh, (nh + 1) * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(gpu_adjv, adjv, mv * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(gpu_adjh, adjh, mh * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(gpu_wghv, wghv, mv * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(gpu_wghh, wghh, mh * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(gpu_incntv, incntv, nv * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(gpu_incnth, incnth, nh * sizeof(int), cudaMemcpyHostToDevice); // timing variables cudaEvent_t start, stop; float milliseconds; // to avoid first extra time useless<<<1,1>>>(); cudaDeviceSynchronize(); if(algorithm == "BFS"){ for(int i=0; i<4; i++){ milliseconds = 0; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); // Call BFS on HyperGraph HyperBFS(nv, mv, nh, mh, 0, outfile, gpu_offv, gpu_offh, gpu_adjv, gpu_adjh); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); printf("Time taken by HyperBFS function to execute is: %.6f ms\n", milliseconds); } } if(algorithm == "BPath"){ for(int i=0; i<4; i++){ milliseconds = 0; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); // Call BPath on HyperGraph HyperBPath(nv, mv, nh, mh, 0, outfile, gpu_offv, gpu_offh, gpu_adjv, gpu_adjh, gpu_incntv, gpu_incnth); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); printf("Time taken by HyperBPath function to execute is: %.6f ms\n", milliseconds); } } if(algorithm == "SSSP"){ for(int i=0; i<4; i++){ milliseconds = 0; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); // Call SSSP on HyperGraph HyperSSSP(nv, mv, nh, mh, 0, outfile, gpu_offv, gpu_offh, gpu_adjv, gpu_adjh, gpu_wghv, gpu_wghh); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); printf("Time taken by HyperSSSP function to execute is: %.6f ms\n", milliseconds); } } if(algorithm == "PageRank"){ for(int i=0; i<4; i++){ milliseconds = 0; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); // Call BFS on HyperGraph HyperPageRank(nv, mv, nh, mh, 0, outfile, gpu_offv, gpu_offh, gpu_adjv, gpu_adjh, 1); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); printf("Time taken by HyperPageRank function to execute is: %.6f ms\n", milliseconds); } } return 0; }
5e6ecfb2d8d845e9433be403b2f1d91b63f566c0.hip
// !!! This is a file automatically generated by hipify!!! #include "Hornet.hpp" #include "Core/GPUHornet/BatchUpdate.cuh" #include "Util/BatchFunctions.hpp" #include <Host/FileUtil.hpp> //xlib::extract_filepath_noextension #include <Device/Util/CudaUtil.cuh> //xlib::deviceInfo #include <algorithm> //std:.generate #include <chrono> //std::chrono #include <random> //std::mt19937_64 #include <hip/hip_runtime_api.h> using namespace hornets_nest; using namespace timer; using namespace std::string_literals; using namespace gpu::batch_property; using HornetGPU = hornets_nest::gpu::Hornet<EMPTY, EMPTY>; void exec(int argc, char* argv[]); /** * @brief Example tester for Hornet */ int main(int argc, char* argv[]) { exec(argc, argv); hipDeviceReset(); } void exec(int argc, char* argv[]) { using namespace graph::structure_prop; using namespace graph::parsing_prop; xlib::device_info(); graph::GraphStd<vid_t, eoff_t> graph; graph.read(argv[1]); //-------------------------------------------------------------------------- HornetInit hornet_init(graph.nV(), graph.nE(), graph.csr_out_offsets(), graph.csr_out_edges()); HornetGPU hornet_gpu(hornet_init); std::cout << "------------------------------------------------" <<std::endl; using namespace batch_gen_property; vid_t* batch_src, *batch_dst; int batch_size = std::stoi(argv[2]); cuMallocHost(batch_src, batch_size); cuMallocHost(batch_dst, batch_size); generateBatch(graph, batch_size, batch_src, batch_dst, BatchGenType::INSERT); gpu::BatchUpdate batch_update(batch_src, batch_dst, batch_size); hornet_gpu.reserveBatchOpResource(batch_size); printf("ne: %d\n", hornet_gpu.nE()); std::cout<<"=======\n"; Timer<DEVICE> TM(3); TM.start(); hornet_gpu.insertEdgeBatch(batch_update); TM.stop(); printf("ne: %d\n", hornet_gpu.nE()); std::cout<<"=======\n"; TM.print("Insertion " + std::to_string(batch_size) + ": "); cuFreeHost(batch_src); cuFreeHost(batch_dst); }
5e6ecfb2d8d845e9433be403b2f1d91b63f566c0.cu
#include "Hornet.hpp" #include "Core/GPUHornet/BatchUpdate.cuh" #include "Util/BatchFunctions.hpp" #include <Host/FileUtil.hpp> //xlib::extract_filepath_noextension #include <Device/Util/CudaUtil.cuh> //xlib::deviceInfo #include <algorithm> //std:.generate #include <chrono> //std::chrono #include <random> //std::mt19937_64 #include <cuda_profiler_api.h> using namespace hornets_nest; using namespace timer; using namespace std::string_literals; using namespace gpu::batch_property; using HornetGPU = hornets_nest::gpu::Hornet<EMPTY, EMPTY>; void exec(int argc, char* argv[]); /** * @brief Example tester for Hornet */ int main(int argc, char* argv[]) { exec(argc, argv); cudaDeviceReset(); } void exec(int argc, char* argv[]) { using namespace graph::structure_prop; using namespace graph::parsing_prop; xlib::device_info(); graph::GraphStd<vid_t, eoff_t> graph; graph.read(argv[1]); //-------------------------------------------------------------------------- HornetInit hornet_init(graph.nV(), graph.nE(), graph.csr_out_offsets(), graph.csr_out_edges()); HornetGPU hornet_gpu(hornet_init); std::cout << "------------------------------------------------" <<std::endl; using namespace batch_gen_property; vid_t* batch_src, *batch_dst; int batch_size = std::stoi(argv[2]); cuMallocHost(batch_src, batch_size); cuMallocHost(batch_dst, batch_size); generateBatch(graph, batch_size, batch_src, batch_dst, BatchGenType::INSERT); gpu::BatchUpdate batch_update(batch_src, batch_dst, batch_size); hornet_gpu.reserveBatchOpResource(batch_size); printf("ne: %d\n", hornet_gpu.nE()); std::cout<<"=======\n"; Timer<DEVICE> TM(3); TM.start(); hornet_gpu.insertEdgeBatch(batch_update); TM.stop(); printf("ne: %d\n", hornet_gpu.nE()); std::cout<<"=======\n"; TM.print("Insertion " + std::to_string(batch_size) + ": "); cuFreeHost(batch_src); cuFreeHost(batch_dst); }
9378440df48b56700e54505d21c9500eb88302d8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* ********************************************************************** ** Copyright (C) 1990, RSA Data Security, Inc. All rights reserved. ** ** ** ** RSA Data Security, Inc. makes no representations concerning ** ** either the merchantability of this software or the suitability ** ** of this software for any particular purpose. It is provided "as ** ** is" without express or implied warranty of any kind. ** ** ** ** These notices must be retained in any copies of any part of this ** ** documentation and/or software. ** ********************************************************************** */ #include "md5CrackerGPU.h" float totalTime; /* typedef a 32 bit type */ typedef unsigned int UINT4; /* Data structure for MD5 (Message Digest) computation */ typedef struct { UINT4 i[2]; /* number of _bits_ handled mod 2^64 */ UINT4 buf[4]; /* scratch buffer */ unsigned char in[64]; /* input buffer */ unsigned char digest[16]; /* actual digest after MD5Final call */ } MD5_CTX; /* F, G and H are basic MD5 functions: selection, majority, parity */ #define F(x, y, z) (((x) & (y)) | ((~x) & (z))) #define G(x, y, z) (((x) & (z)) | ((y) & (~z))) #define H(x, y, z) ((x) ^ (y) ^ (z)) #define I(x, y, z) ((y) ^ ((x) | (~z))) /* ROTATE_LEFT rotates x left n bits */ #define ROTATE_LEFT(x, n) (((x) << (n)) | ((x) >> (32-(n)))) /* FF, GG, HH, and II transformations for rounds 1, 2, 3, and 4 */ /* Rotation is separate from addition to prevent recomputation */ #define FF(a, b, c, d, x, s, ac) \ {(a) += F ((b), (c), (d)) + (x) + (UINT4)(ac); \ (a) = ROTATE_LEFT ((a), (s)); \ (a) += (b); \ } #define GG(a, b, c, d, x, s, ac) \ {(a) += G ((b), (c), (d)) + (x) + (UINT4)(ac); \ (a) = ROTATE_LEFT ((a), (s)); \ (a) += (b); \ } #define HH(a, b, c, d, x, s, ac) \ {(a) += H ((b), (c), (d)) + (x) + (UINT4)(ac); \ (a) = ROTATE_LEFT ((a), (s)); \ (a) += (b); \ } #define II(a, b, c, d, x, s, ac) \ {(a) += I ((b), (c), (d)) + (x) + (UINT4)(ac); \ (a) = ROTATE_LEFT ((a), (s)); \ (a) += (b); \ } __device__ static void Transform (UINT4 * buf, UINT4 * in) { UINT4 a = buf[0], b = buf[1], c = buf[2], d = buf[3]; /* Round 1 */ #define S11 7 #define S12 12 #define S13 17 #define S14 22 FF ( a, b, c, d, in[ 0], S11, 3614090360); /* 1 */ FF ( d, a, b, c, in[ 1], S12, 3905402710); /* 2 */ FF ( c, d, a, b, in[ 2], S13, 606105819); /* 3 */ FF ( b, c, d, a, in[ 3], S14, 3250441966); /* 4 */ FF ( a, b, c, d, in[ 4], S11, 4118548399); /* 5 */ FF ( d, a, b, c, in[ 5], S12, 1200080426); /* 6 */ FF ( c, d, a, b, in[ 6], S13, 2821735955); /* 7 */ FF ( b, c, d, a, in[ 7], S14, 4249261313); /* 8 */ FF ( a, b, c, d, in[ 8], S11, 1770035416); /* 9 */ FF ( d, a, b, c, in[ 9], S12, 2336552879); /* 10 */ FF ( c, d, a, b, in[10], S13, 4294925233); /* 11 */ FF ( b, c, d, a, in[11], S14, 2304563134); /* 12 */ FF ( a, b, c, d, in[12], S11, 1804603682); /* 13 */ FF ( d, a, b, c, in[13], S12, 4254626195); /* 14 */ FF ( c, d, a, b, in[14], S13, 2792965006); /* 15 */ FF ( b, c, d, a, in[15], S14, 1236535329); /* 16 */ /* Round 2 */ #define S21 5 #define S22 9 #define S23 14 #define S24 20 GG ( a, b, c, d, in[ 1], S21, 4129170786); /* 17 */ GG ( d, a, b, c, in[ 6], S22, 3225465664); /* 18 */ GG ( c, d, a, b, in[11], S23, 643717713); /* 19 */ GG ( b, c, d, a, in[ 0], S24, 3921069994); /* 20 */ GG ( a, b, c, d, in[ 5], S21, 3593408605); /* 21 */ GG ( d, a, b, c, in[10], S22, 38016083); /* 22 */ GG ( c, d, a, b, in[15], S23, 3634488961); /* 23 */ GG ( b, c, d, a, in[ 4], S24, 3889429448); /* 24 */ GG ( a, b, c, d, in[ 9], S21, 568446438); /* 25 */ GG ( d, a, b, c, in[14], S22, 3275163606); /* 26 */ GG ( c, d, a, b, in[ 3], S23, 4107603335); /* 27 */ GG ( b, c, d, a, in[ 8], S24, 1163531501); /* 28 */ GG ( a, b, c, d, in[13], S21, 2850285829); /* 29 */ GG ( d, a, b, c, in[ 2], S22, 4243563512); /* 30 */ GG ( c, d, a, b, in[ 7], S23, 1735328473); /* 31 */ GG ( b, c, d, a, in[12], S24, 2368359562); /* 32 */ /* Round 3 */ #define S31 4 #define S32 11 #define S33 16 #define S34 23 HH ( a, b, c, d, in[ 5], S31, 4294588738); /* 33 */ HH ( d, a, b, c, in[ 8], S32, 2272392833); /* 34 */ HH ( c, d, a, b, in[11], S33, 1839030562); /* 35 */ HH ( b, c, d, a, in[14], S34, 4259657740); /* 36 */ HH ( a, b, c, d, in[ 1], S31, 2763975236); /* 37 */ HH ( d, a, b, c, in[ 4], S32, 1272893353); /* 38 */ HH ( c, d, a, b, in[ 7], S33, 4139469664); /* 39 */ HH ( b, c, d, a, in[10], S34, 3200236656); /* 40 */ HH ( a, b, c, d, in[13], S31, 681279174); /* 41 */ HH ( d, a, b, c, in[ 0], S32, 3936430074); /* 42 */ HH ( c, d, a, b, in[ 3], S33, 3572445317); /* 43 */ HH ( b, c, d, a, in[ 6], S34, 76029189); /* 44 */ HH ( a, b, c, d, in[ 9], S31, 3654602809); /* 45 */ HH ( d, a, b, c, in[12], S32, 3873151461); /* 46 */ HH ( c, d, a, b, in[15], S33, 530742520); /* 47 */ HH ( b, c, d, a, in[ 2], S34, 3299628645); /* 48 */ /* Round 4 */ #define S41 6 #define S42 10 #define S43 15 #define S44 21 II ( a, b, c, d, in[ 0], S41, 4096336452); /* 49 */ II ( d, a, b, c, in[ 7], S42, 1126891415); /* 50 */ II ( c, d, a, b, in[14], S43, 2878612391); /* 51 */ II ( b, c, d, a, in[ 5], S44, 4237533241); /* 52 */ II ( a, b, c, d, in[12], S41, 1700485571); /* 53 */ II ( d, a, b, c, in[ 3], S42, 2399980690); /* 54 */ II ( c, d, a, b, in[10], S43, 4293915773); /* 55 */ II ( b, c, d, a, in[ 1], S44, 2240044497); /* 56 */ II ( a, b, c, d, in[ 8], S41, 1873313359); /* 57 */ II ( d, a, b, c, in[15], S42, 4264355552); /* 58 */ II ( c, d, a, b, in[ 6], S43, 2734768916); /* 59 */ II ( b, c, d, a, in[13], S44, 1309151649); /* 60 */ II ( a, b, c, d, in[ 4], S41, 4149444226); /* 61 */ II ( d, a, b, c, in[11], S42, 3174756917); /* 62 */ II ( c, d, a, b, in[ 2], S43, 718787259); /* 63 */ II ( b, c, d, a, in[ 9], S44, 3951481745); /* 64 */ buf[0] += a; buf[1] += b; buf[2] += c; buf[3] += d; } /************************ CUDA Functions ************************/ __device__ void MD5Init (MD5_CTX * mdContext) { mdContext->i[0] = mdContext->i[1] = (UINT4)0; /* Load magic initialization constants. */ mdContext->buf[0] = (UINT4)0x67452301; mdContext->buf[1] = (UINT4)0xefcdab89; mdContext->buf[2] = (UINT4)0x98badcfe; mdContext->buf[3] = (UINT4)0x10325476; } __device__ void MD5Update (MD5_CTX * mdContext, unsigned char * inBuf, unsigned int inLen) { UINT4 in[16]; int mdi; unsigned int i, ii; /* compute number of bytes mod 64 */ mdi = (int)((mdContext->i[0] >> 3) & 0x3F); /* update number of bits */ if ((mdContext->i[0] + ((UINT4)inLen << 3)) < mdContext->i[0]) mdContext->i[1]++; mdContext->i[0] += ((UINT4)inLen << 3); mdContext->i[1] += ((UINT4)inLen >> 29); while (inLen--) { /* add new character to buffer, increment mdi */ mdContext->in[mdi++] = *inBuf++; /* transform if necessary */ if (mdi == 0x40) { for (i = 0, ii = 0; i < 16; i++, ii += 4) in[i] = (((UINT4)mdContext->in[ii+3]) << 24) | (((UINT4)mdContext->in[ii+2]) << 16) | (((UINT4)mdContext->in[ii+1]) << 8) | ((UINT4)mdContext->in[ii]); Transform (mdContext->buf, in); mdi = 0; } } } __device__ void MD5Final (MD5_CTX * mdContext) { UINT4 in[16]; int mdi; unsigned int i, ii; unsigned int padLen; unsigned char PADDING[64] = { 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; /* save number of bits */ in[14] = mdContext->i[0]; in[15] = mdContext->i[1]; /* compute number of bytes mod 64 */ mdi = (int)((mdContext->i[0] >> 3) & 0x3F); /* pad out to 56 mod 64 */ padLen = (mdi < 56) ? (56 - mdi) : (120 - mdi); MD5Update (mdContext, PADDING, padLen); /* append length in bits and transform */ for (i = 0, ii = 0; i < 14; i++, ii += 4) in[i] = (((UINT4)mdContext->in[ii+3]) << 24) | (((UINT4)mdContext->in[ii+2]) << 16) | (((UINT4)mdContext->in[ii+1]) << 8) | ((UINT4)mdContext->in[ii]); Transform (mdContext->buf, in); /* store buffer in digest */ for (i = 0, ii = 0; i < 4; i++, ii += 4) { mdContext->digest[ii] = (unsigned char)(mdContext->buf[i] & 0xFF); mdContext->digest[ii+1] = (unsigned char)((mdContext->buf[i] >> 8) & 0xFF); mdContext->digest[ii+2] = (unsigned char)((mdContext->buf[i] >> 16) & 0xFF); mdContext->digest[ii+3] = (unsigned char)((mdContext->buf[i] >> 24) & 0xFF); } } /************************ Main Functions ************************/ #include <stdio.h> #include <sys/types.h> #include <time.h> #include <string.h> __device__ static char * getString(int start, int length, char * words){ char out[30]; memset(out, 0, sizeof(out)); //printf("GPU STRING:%s\n",out); for(int i = 0; i < length; ++i) out[i] = words[start+i]; //printf("%s",words[start+i]); //printf("\n"); //printf("Out:%s\n",out); return out; } __global__ static void MDString (char * words,int * hash_found,int * target_hash,int * indexes, int numberOfWords,int * wordLengths) { // Get thread id int idx = blockIdx.x * blockDim.x + threadIdx.x; // Check if MD5 has been cracked if (idx == 0) hash_found[0] = 0; // If hash hasn't been found, get MD5 for next word if (idx < numberOfWords && hash_found[0] != 1){ // Get string for this thread char * inString = getString(indexes[idx],wordLengths[idx],words); // Calculate MD5 MD5_CTX mdContext; unsigned int len = wordLengths[idx]; unsigned char * uInString = reinterpret_cast<unsigned char *>( inString ); MD5Init (&mdContext); MD5Update (&mdContext, uInString, len); MD5Final (&mdContext); // Check if MD5's are equal int flag_same = 1; for (int i = 0; i < 16; ++i){ if(mdContext.digest[i] != target_hash[i]){ // MD5 is not equal, change flag status and break cycle flag_same = 0; break; } } // Check if MD5 has been found if (flag_same == 1){ // MD5 cracked // Print word printf("-------------MD5 Cracked!-------------\nWord: %s\n--------------------------------------\n",inString); // Change flag to True and break cycle hash_found[0] = 1; } } } extern "C" int callMD5CUDA(struct deviceInfo * device,char * words, int * target_hash,int * indexes, int numberOfWords,int * wordLengths) { // Start Execution Time int * d_hash_found; hipMalloc((void**) &d_hash_found,sizeof(int)); hipEvent_t start, stop; float elapsedTime; hipEventCreate(&start); hipEventRecord(start, 0); // Call CUDA Function on GPU hipLaunchKernelGGL(( MDString) , dim3(device->max_blocks), dim3(device->max_threads), 0, 0, words, d_hash_found,target_hash,indexes,numberOfWords,wordLengths); // Stop Execution Time hipEventCreate(&stop); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime( &elapsedTime, start, stop); totalTime+=elapsedTime; // Copy flag from GPU to CPU and free memory int * h_hash_found = (int *) malloc(sizeof(int)); hipMemcpy(h_hash_found, d_hash_found, sizeof(int), hipMemcpyDeviceToHost ); hipFree(d_hash_found); int returnFlag = h_hash_found[0]; free(h_hash_found); // Return flag hash found to CPU return returnFlag; }
9378440df48b56700e54505d21c9500eb88302d8.cu
/* ********************************************************************** ** Copyright (C) 1990, RSA Data Security, Inc. All rights reserved. ** ** ** ** RSA Data Security, Inc. makes no representations concerning ** ** either the merchantability of this software or the suitability ** ** of this software for any particular purpose. It is provided "as ** ** is" without express or implied warranty of any kind. ** ** ** ** These notices must be retained in any copies of any part of this ** ** documentation and/or software. ** ********************************************************************** */ #include "md5CrackerGPU.h" float totalTime; /* typedef a 32 bit type */ typedef unsigned int UINT4; /* Data structure for MD5 (Message Digest) computation */ typedef struct { UINT4 i[2]; /* number of _bits_ handled mod 2^64 */ UINT4 buf[4]; /* scratch buffer */ unsigned char in[64]; /* input buffer */ unsigned char digest[16]; /* actual digest after MD5Final call */ } MD5_CTX; /* F, G and H are basic MD5 functions: selection, majority, parity */ #define F(x, y, z) (((x) & (y)) | ((~x) & (z))) #define G(x, y, z) (((x) & (z)) | ((y) & (~z))) #define H(x, y, z) ((x) ^ (y) ^ (z)) #define I(x, y, z) ((y) ^ ((x) | (~z))) /* ROTATE_LEFT rotates x left n bits */ #define ROTATE_LEFT(x, n) (((x) << (n)) | ((x) >> (32-(n)))) /* FF, GG, HH, and II transformations for rounds 1, 2, 3, and 4 */ /* Rotation is separate from addition to prevent recomputation */ #define FF(a, b, c, d, x, s, ac) \ {(a) += F ((b), (c), (d)) + (x) + (UINT4)(ac); \ (a) = ROTATE_LEFT ((a), (s)); \ (a) += (b); \ } #define GG(a, b, c, d, x, s, ac) \ {(a) += G ((b), (c), (d)) + (x) + (UINT4)(ac); \ (a) = ROTATE_LEFT ((a), (s)); \ (a) += (b); \ } #define HH(a, b, c, d, x, s, ac) \ {(a) += H ((b), (c), (d)) + (x) + (UINT4)(ac); \ (a) = ROTATE_LEFT ((a), (s)); \ (a) += (b); \ } #define II(a, b, c, d, x, s, ac) \ {(a) += I ((b), (c), (d)) + (x) + (UINT4)(ac); \ (a) = ROTATE_LEFT ((a), (s)); \ (a) += (b); \ } __device__ static void Transform (UINT4 * buf, UINT4 * in) { UINT4 a = buf[0], b = buf[1], c = buf[2], d = buf[3]; /* Round 1 */ #define S11 7 #define S12 12 #define S13 17 #define S14 22 FF ( a, b, c, d, in[ 0], S11, 3614090360); /* 1 */ FF ( d, a, b, c, in[ 1], S12, 3905402710); /* 2 */ FF ( c, d, a, b, in[ 2], S13, 606105819); /* 3 */ FF ( b, c, d, a, in[ 3], S14, 3250441966); /* 4 */ FF ( a, b, c, d, in[ 4], S11, 4118548399); /* 5 */ FF ( d, a, b, c, in[ 5], S12, 1200080426); /* 6 */ FF ( c, d, a, b, in[ 6], S13, 2821735955); /* 7 */ FF ( b, c, d, a, in[ 7], S14, 4249261313); /* 8 */ FF ( a, b, c, d, in[ 8], S11, 1770035416); /* 9 */ FF ( d, a, b, c, in[ 9], S12, 2336552879); /* 10 */ FF ( c, d, a, b, in[10], S13, 4294925233); /* 11 */ FF ( b, c, d, a, in[11], S14, 2304563134); /* 12 */ FF ( a, b, c, d, in[12], S11, 1804603682); /* 13 */ FF ( d, a, b, c, in[13], S12, 4254626195); /* 14 */ FF ( c, d, a, b, in[14], S13, 2792965006); /* 15 */ FF ( b, c, d, a, in[15], S14, 1236535329); /* 16 */ /* Round 2 */ #define S21 5 #define S22 9 #define S23 14 #define S24 20 GG ( a, b, c, d, in[ 1], S21, 4129170786); /* 17 */ GG ( d, a, b, c, in[ 6], S22, 3225465664); /* 18 */ GG ( c, d, a, b, in[11], S23, 643717713); /* 19 */ GG ( b, c, d, a, in[ 0], S24, 3921069994); /* 20 */ GG ( a, b, c, d, in[ 5], S21, 3593408605); /* 21 */ GG ( d, a, b, c, in[10], S22, 38016083); /* 22 */ GG ( c, d, a, b, in[15], S23, 3634488961); /* 23 */ GG ( b, c, d, a, in[ 4], S24, 3889429448); /* 24 */ GG ( a, b, c, d, in[ 9], S21, 568446438); /* 25 */ GG ( d, a, b, c, in[14], S22, 3275163606); /* 26 */ GG ( c, d, a, b, in[ 3], S23, 4107603335); /* 27 */ GG ( b, c, d, a, in[ 8], S24, 1163531501); /* 28 */ GG ( a, b, c, d, in[13], S21, 2850285829); /* 29 */ GG ( d, a, b, c, in[ 2], S22, 4243563512); /* 30 */ GG ( c, d, a, b, in[ 7], S23, 1735328473); /* 31 */ GG ( b, c, d, a, in[12], S24, 2368359562); /* 32 */ /* Round 3 */ #define S31 4 #define S32 11 #define S33 16 #define S34 23 HH ( a, b, c, d, in[ 5], S31, 4294588738); /* 33 */ HH ( d, a, b, c, in[ 8], S32, 2272392833); /* 34 */ HH ( c, d, a, b, in[11], S33, 1839030562); /* 35 */ HH ( b, c, d, a, in[14], S34, 4259657740); /* 36 */ HH ( a, b, c, d, in[ 1], S31, 2763975236); /* 37 */ HH ( d, a, b, c, in[ 4], S32, 1272893353); /* 38 */ HH ( c, d, a, b, in[ 7], S33, 4139469664); /* 39 */ HH ( b, c, d, a, in[10], S34, 3200236656); /* 40 */ HH ( a, b, c, d, in[13], S31, 681279174); /* 41 */ HH ( d, a, b, c, in[ 0], S32, 3936430074); /* 42 */ HH ( c, d, a, b, in[ 3], S33, 3572445317); /* 43 */ HH ( b, c, d, a, in[ 6], S34, 76029189); /* 44 */ HH ( a, b, c, d, in[ 9], S31, 3654602809); /* 45 */ HH ( d, a, b, c, in[12], S32, 3873151461); /* 46 */ HH ( c, d, a, b, in[15], S33, 530742520); /* 47 */ HH ( b, c, d, a, in[ 2], S34, 3299628645); /* 48 */ /* Round 4 */ #define S41 6 #define S42 10 #define S43 15 #define S44 21 II ( a, b, c, d, in[ 0], S41, 4096336452); /* 49 */ II ( d, a, b, c, in[ 7], S42, 1126891415); /* 50 */ II ( c, d, a, b, in[14], S43, 2878612391); /* 51 */ II ( b, c, d, a, in[ 5], S44, 4237533241); /* 52 */ II ( a, b, c, d, in[12], S41, 1700485571); /* 53 */ II ( d, a, b, c, in[ 3], S42, 2399980690); /* 54 */ II ( c, d, a, b, in[10], S43, 4293915773); /* 55 */ II ( b, c, d, a, in[ 1], S44, 2240044497); /* 56 */ II ( a, b, c, d, in[ 8], S41, 1873313359); /* 57 */ II ( d, a, b, c, in[15], S42, 4264355552); /* 58 */ II ( c, d, a, b, in[ 6], S43, 2734768916); /* 59 */ II ( b, c, d, a, in[13], S44, 1309151649); /* 60 */ II ( a, b, c, d, in[ 4], S41, 4149444226); /* 61 */ II ( d, a, b, c, in[11], S42, 3174756917); /* 62 */ II ( c, d, a, b, in[ 2], S43, 718787259); /* 63 */ II ( b, c, d, a, in[ 9], S44, 3951481745); /* 64 */ buf[0] += a; buf[1] += b; buf[2] += c; buf[3] += d; } /************************ CUDA Functions ************************/ __device__ void MD5Init (MD5_CTX * mdContext) { mdContext->i[0] = mdContext->i[1] = (UINT4)0; /* Load magic initialization constants. */ mdContext->buf[0] = (UINT4)0x67452301; mdContext->buf[1] = (UINT4)0xefcdab89; mdContext->buf[2] = (UINT4)0x98badcfe; mdContext->buf[3] = (UINT4)0x10325476; } __device__ void MD5Update (MD5_CTX * mdContext, unsigned char * inBuf, unsigned int inLen) { UINT4 in[16]; int mdi; unsigned int i, ii; /* compute number of bytes mod 64 */ mdi = (int)((mdContext->i[0] >> 3) & 0x3F); /* update number of bits */ if ((mdContext->i[0] + ((UINT4)inLen << 3)) < mdContext->i[0]) mdContext->i[1]++; mdContext->i[0] += ((UINT4)inLen << 3); mdContext->i[1] += ((UINT4)inLen >> 29); while (inLen--) { /* add new character to buffer, increment mdi */ mdContext->in[mdi++] = *inBuf++; /* transform if necessary */ if (mdi == 0x40) { for (i = 0, ii = 0; i < 16; i++, ii += 4) in[i] = (((UINT4)mdContext->in[ii+3]) << 24) | (((UINT4)mdContext->in[ii+2]) << 16) | (((UINT4)mdContext->in[ii+1]) << 8) | ((UINT4)mdContext->in[ii]); Transform (mdContext->buf, in); mdi = 0; } } } __device__ void MD5Final (MD5_CTX * mdContext) { UINT4 in[16]; int mdi; unsigned int i, ii; unsigned int padLen; unsigned char PADDING[64] = { 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; /* save number of bits */ in[14] = mdContext->i[0]; in[15] = mdContext->i[1]; /* compute number of bytes mod 64 */ mdi = (int)((mdContext->i[0] >> 3) & 0x3F); /* pad out to 56 mod 64 */ padLen = (mdi < 56) ? (56 - mdi) : (120 - mdi); MD5Update (mdContext, PADDING, padLen); /* append length in bits and transform */ for (i = 0, ii = 0; i < 14; i++, ii += 4) in[i] = (((UINT4)mdContext->in[ii+3]) << 24) | (((UINT4)mdContext->in[ii+2]) << 16) | (((UINT4)mdContext->in[ii+1]) << 8) | ((UINT4)mdContext->in[ii]); Transform (mdContext->buf, in); /* store buffer in digest */ for (i = 0, ii = 0; i < 4; i++, ii += 4) { mdContext->digest[ii] = (unsigned char)(mdContext->buf[i] & 0xFF); mdContext->digest[ii+1] = (unsigned char)((mdContext->buf[i] >> 8) & 0xFF); mdContext->digest[ii+2] = (unsigned char)((mdContext->buf[i] >> 16) & 0xFF); mdContext->digest[ii+3] = (unsigned char)((mdContext->buf[i] >> 24) & 0xFF); } } /************************ Main Functions ************************/ #include <stdio.h> #include <sys/types.h> #include <time.h> #include <string.h> __device__ static char * getString(int start, int length, char * words){ char out[30]; memset(out, 0, sizeof(out)); //printf("GPU STRING:%s\n",out); for(int i = 0; i < length; ++i) out[i] = words[start+i]; //printf("%s",words[start+i]); //printf("\n"); //printf("Out:%s\n",out); return out; } __global__ static void MDString (char * words,int * hash_found,int * target_hash,int * indexes, int numberOfWords,int * wordLengths) { // Get thread id int idx = blockIdx.x * blockDim.x + threadIdx.x; // Check if MD5 has been cracked if (idx == 0) hash_found[0] = 0; // If hash hasn't been found, get MD5 for next word if (idx < numberOfWords && hash_found[0] != 1){ // Get string for this thread char * inString = getString(indexes[idx],wordLengths[idx],words); // Calculate MD5 MD5_CTX mdContext; unsigned int len = wordLengths[idx]; unsigned char * uInString = reinterpret_cast<unsigned char *>( inString ); MD5Init (&mdContext); MD5Update (&mdContext, uInString, len); MD5Final (&mdContext); // Check if MD5's are equal int flag_same = 1; for (int i = 0; i < 16; ++i){ if(mdContext.digest[i] != target_hash[i]){ // MD5 is not equal, change flag status and break cycle flag_same = 0; break; } } // Check if MD5 has been found if (flag_same == 1){ // MD5 cracked // Print word printf("-------------MD5 Cracked!-------------\nWord: %s\n--------------------------------------\n",inString); // Change flag to True and break cycle hash_found[0] = 1; } } } extern "C" int callMD5CUDA(struct deviceInfo * device,char * words, int * target_hash,int * indexes, int numberOfWords,int * wordLengths) { // Start Execution Time int * d_hash_found; cudaMalloc((void**) &d_hash_found,sizeof(int)); cudaEvent_t start, stop; float elapsedTime; cudaEventCreate(&start); cudaEventRecord(start, 0); // Call CUDA Function on GPU MDString <<< device->max_blocks, device->max_threads>>> (words, d_hash_found,target_hash,indexes,numberOfWords,wordLengths); // Stop Execution Time cudaEventCreate(&stop); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime( &elapsedTime, start, stop); totalTime+=elapsedTime; // Copy flag from GPU to CPU and free memory int * h_hash_found = (int *) malloc(sizeof(int)); cudaMemcpy(h_hash_found, d_hash_found, sizeof(int), cudaMemcpyDeviceToHost ); cudaFree(d_hash_found); int returnFlag = h_hash_found[0]; free(h_hash_found); // Return flag hash found to CPU return returnFlag; }
7cb34e97fcce04083a734096f48d3e222bd02963.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "oneflow/core/ndarray/ndarray_assign_core.h" #include "oneflow/core/device/cuda_util.h" #include "oneflow/core/kernel/kernel_util.h" namespace oneflow { namespace { template<typename T, int NDIMS> __global__ void NdarrayAssignGpu(XpuVarNdarray<T> y, const XpuReducedNdarray<T, NDIMS> reduced) { NdarrayAssignCore<T, NDIMS>::Assign(y, reduced); } } // namespace template<typename T, int NDIMS> struct NdarrayAssignCoreWrapper<DeviceType::kGPU, T, NDIMS> final { static void Assign(DeviceCtx* ctx, const XpuVarNdarray<T>& y, const XpuReducedNdarray<T, NDIMS>& reduced) { size_t n = y.host_shape().HostElemNum(); RUN_CUDA_KERNEL((NdarrayAssignGpu<T, NDIMS>), ctx, n, y, reduced); } }; #define INSTANTIATE_NDARRAY_ASSIGN(dtype_pair, NDIMS) \ template struct NdarrayAssignCoreWrapper<DeviceType::kGPU, OF_PP_PAIR_FIRST(dtype_pair), NDIMS>; OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INSTANTIATE_NDARRAY_ASSIGN, ARITHMETIC_DATA_TYPE_SEQ HALF_DATA_TYPE_SEQ, DIM_SEQ); } // namespace oneflow
7cb34e97fcce04083a734096f48d3e222bd02963.cu
/* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "oneflow/core/ndarray/ndarray_assign_core.h" #include "oneflow/core/device/cuda_util.h" #include "oneflow/core/kernel/kernel_util.h" namespace oneflow { namespace { template<typename T, int NDIMS> __global__ void NdarrayAssignGpu(XpuVarNdarray<T> y, const XpuReducedNdarray<T, NDIMS> reduced) { NdarrayAssignCore<T, NDIMS>::Assign(y, reduced); } } // namespace template<typename T, int NDIMS> struct NdarrayAssignCoreWrapper<DeviceType::kGPU, T, NDIMS> final { static void Assign(DeviceCtx* ctx, const XpuVarNdarray<T>& y, const XpuReducedNdarray<T, NDIMS>& reduced) { size_t n = y.host_shape().HostElemNum(); RUN_CUDA_KERNEL((NdarrayAssignGpu<T, NDIMS>), ctx, n, y, reduced); } }; #define INSTANTIATE_NDARRAY_ASSIGN(dtype_pair, NDIMS) \ template struct NdarrayAssignCoreWrapper<DeviceType::kGPU, OF_PP_PAIR_FIRST(dtype_pair), NDIMS>; OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INSTANTIATE_NDARRAY_ASSIGN, ARITHMETIC_DATA_TYPE_SEQ HALF_DATA_TYPE_SEQ, DIM_SEQ); } // namespace oneflow
e100bb22ecf828510fb13d39fdd07f5f0a591138.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "prod_cuda.cuh" #include "matrix.hpp" __global__ void matrix_mult(Matrix::data_t *lhs, Matrix::data_t *rhs, Matrix::data_t *out, size_t M, size_t N, size_t K) { const size_t i = threadIdx.x + blockDim.x * blockIdx.x; const size_t j = threadIdx.y + blockDim.y * blockIdx.y; Matrix::data_t sum = 0; if (i < M && j < K) { for (size_t k = 0; k < N; k++) { sum += lhs[i * N + k] // lhs(i, k) * rhs[j * K + k]; // rhs(k, j) => rhs'(j, k) } out[i * N + j] = sum; // out(i, j) } }
e100bb22ecf828510fb13d39fdd07f5f0a591138.cu
#include "prod_cuda.cuh" #include "matrix.hpp" __global__ void matrix_mult(Matrix::data_t *lhs, Matrix::data_t *rhs, Matrix::data_t *out, size_t M, size_t N, size_t K) { const size_t i = threadIdx.x + blockDim.x * blockIdx.x; const size_t j = threadIdx.y + blockDim.y * blockIdx.y; Matrix::data_t sum = 0; if (i < M && j < K) { for (size_t k = 0; k < N; k++) { sum += lhs[i * N + k] // lhs(i, k) * rhs[j * K + k]; // rhs(k, j) => rhs'(j, k) } out[i * N + j] = sum; // out(i, j) } }
c2cd78e467cb68654b4c28eec096c92f44027452.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 2019-2020 NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "index_gpu.cuh" #include <thrust/transform_scan.h> namespace claraparabricks { namespace genomeworks { namespace cudamapper { namespace details { namespace index_gpu { void find_first_occurrences_of_representations(DefaultDeviceAllocator allocator, device_buffer<representation_t>& unique_representations_d, device_buffer<std::uint32_t>& first_occurrence_index_d, const device_buffer<representation_t>& input_representations_d, const hipStream_t cuda_stream) { GW_NVTX_RANGE(profiler, "IndexGPU::find_first_occurrences_of_representations"); // TODO: Currently maximum number of thread blocks is 2^31-1. This means we support representations of up to (2^31-1) * number_of_threads // With 256 that's (2^31-1)*2^8 ~= 2^39. If representation is 4-byte (we expect it to be 4 or 8) that's 2^39*2^2 = 2^41 = 2TB. We don't expect to hit this limit any time soon // The kernel can be modified to process several representation per thread to support arbitrary size std::uint32_t number_of_threads = 256; // arbitrary value std::uint32_t number_of_blocks = (input_representations_d.size() - 1) / number_of_threads + 1; // do inclusive scan // for example for // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 // 0 0 0 0 12 12 12 12 12 12 23 23 23 32 32 32 32 32 46 46 46 // gives // 1 1 1 1 2 2 2 2 2 2 3 3 3 4 4 4 4 4 5 5 5 // meaning all elements with the same representation have the same value and those values are sorted in increasing order starting from 1 device_buffer<std::uint64_t> representation_index_mask_d(input_representations_d.size(), allocator, cuda_stream); const std::int64_t number_of_representations = get_size(input_representations_d); const representation_t* const input_representations_d_data = input_representations_d.data(); thrust::transform_inclusive_scan( thrust::hip::par(allocator).on(cuda_stream), thrust::make_counting_iterator(std::int64_t(0)), thrust::make_counting_iterator(number_of_representations), representation_index_mask_d.begin(), [input_representations_d_data] __device__(std::int64_t idx) -> std::uint64_t { if (idx == 0) return 1; return (input_representations_d_data[idx - 1] != input_representations_d_data[idx] ? 1 : 0); }, thrust::plus<std::uint64_t>()); const std::uint64_t number_of_unique_representations = cudautils::get_value_from_device(representation_index_mask_d.end() - 1, cuda_stream); // D2H copy first_occurrence_index_d.clear_and_resize(number_of_unique_representations + 1); // <- +1 for the additional element unique_representations_d.clear_and_resize(number_of_unique_representations); hipLaunchKernelGGL(( find_first_occurrences_of_representations_kernel), dim3(number_of_blocks), dim3(number_of_threads), 0, cuda_stream, representation_index_mask_d.data(), input_representations_d.data(), representation_index_mask_d.size(), first_occurrence_index_d.data(), unique_representations_d.data()); // last element is the total number of elements in representations array std::uint32_t input_representations_size = input_representations_d.size(); cudautils::set_device_value_async(first_occurrence_index_d.end() - 1, &input_representations_size, cuda_stream); // H2D copy hipStreamSynchronize(cuda_stream); //async H2D copy has to complete before input_representations_size goes out of scope } __global__ void find_first_occurrences_of_representations_kernel(const std::uint64_t* const representation_index_mask_d, const representation_t* const input_representations_d, const std::size_t number_of_input_elements, std::uint32_t* const starting_index_of_each_representation_d, representation_t* const unique_representations_d) { // one thread per element of input_representations_d (i.e. sketch_element) std::uint64_t index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= number_of_input_elements) return; if (index == 0) { starting_index_of_each_representation_d[0] = 0; unique_representations_d[0] = input_representations_d[0]; } else { // representation_index_mask_d gives a unique index to each representation, starting from 1, thus '-1' const auto representation_index_mask_for_this_index = representation_index_mask_d[index]; if (representation_index_mask_for_this_index != representation_index_mask_d[index - 1]) { // if new representation is not the same as its left neighbor // save the index at which that representation starts starting_index_of_each_representation_d[representation_index_mask_for_this_index - 1] = index; unique_representations_d[representation_index_mask_for_this_index - 1] = input_representations_d[index]; } } } __global__ void compress_unique_representations_after_filtering_kernel(const std::uint64_t number_of_unique_representation_before_compression, const representation_t* const unique_representations_before_compression_d, const std::uint32_t* const first_occurrence_of_representation_before_compression_d, const std::uint32_t* const new_unique_representation_index_d, representation_t* const unique_representations_after_compression_d, std::uint32_t* const first_occurrence_of_representation_after_compression_d) { const std::uint64_t i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= number_of_unique_representation_before_compression + 1) // +1 for the additional element in first_occurrence_of_representation return; if (i == number_of_unique_representation_before_compression) // additional element in first_occurrence_of_representation { first_occurrence_of_representation_after_compression_d[new_unique_representation_index_d[i]] = first_occurrence_of_representation_before_compression_d[i]; } else { // TODO: load these two values into shared memory if (first_occurrence_of_representation_before_compression_d[i] != first_occurrence_of_representation_before_compression_d[i + 1]) // if it's the same that means that this representation has been filtered out { const std::uint32_t new_unique_representation_index = new_unique_representation_index_d[i]; unique_representations_after_compression_d[new_unique_representation_index] = unique_representations_before_compression_d[i]; first_occurrence_of_representation_after_compression_d[new_unique_representation_index] = first_occurrence_of_representation_before_compression_d[i]; } } } } // namespace index_gpu } // namespace details } // namespace cudamapper } // namespace genomeworks } // namespace claraparabricks
c2cd78e467cb68654b4c28eec096c92f44027452.cu
/* * Copyright 2019-2020 NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "index_gpu.cuh" #include <thrust/transform_scan.h> namespace claraparabricks { namespace genomeworks { namespace cudamapper { namespace details { namespace index_gpu { void find_first_occurrences_of_representations(DefaultDeviceAllocator allocator, device_buffer<representation_t>& unique_representations_d, device_buffer<std::uint32_t>& first_occurrence_index_d, const device_buffer<representation_t>& input_representations_d, const cudaStream_t cuda_stream) { GW_NVTX_RANGE(profiler, "IndexGPU::find_first_occurrences_of_representations"); // TODO: Currently maximum number of thread blocks is 2^31-1. This means we support representations of up to (2^31-1) * number_of_threads // With 256 that's (2^31-1)*2^8 ~= 2^39. If representation is 4-byte (we expect it to be 4 or 8) that's 2^39*2^2 = 2^41 = 2TB. We don't expect to hit this limit any time soon // The kernel can be modified to process several representation per thread to support arbitrary size std::uint32_t number_of_threads = 256; // arbitrary value std::uint32_t number_of_blocks = (input_representations_d.size() - 1) / number_of_threads + 1; // do inclusive scan // for example for // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 // 0 0 0 0 12 12 12 12 12 12 23 23 23 32 32 32 32 32 46 46 46 // gives // 1 1 1 1 2 2 2 2 2 2 3 3 3 4 4 4 4 4 5 5 5 // meaning all elements with the same representation have the same value and those values are sorted in increasing order starting from 1 device_buffer<std::uint64_t> representation_index_mask_d(input_representations_d.size(), allocator, cuda_stream); const std::int64_t number_of_representations = get_size(input_representations_d); const representation_t* const input_representations_d_data = input_representations_d.data(); thrust::transform_inclusive_scan( thrust::cuda::par(allocator).on(cuda_stream), thrust::make_counting_iterator(std::int64_t(0)), thrust::make_counting_iterator(number_of_representations), representation_index_mask_d.begin(), [input_representations_d_data] __device__(std::int64_t idx) -> std::uint64_t { if (idx == 0) return 1; return (input_representations_d_data[idx - 1] != input_representations_d_data[idx] ? 1 : 0); }, thrust::plus<std::uint64_t>()); const std::uint64_t number_of_unique_representations = cudautils::get_value_from_device(representation_index_mask_d.end() - 1, cuda_stream); // D2H copy first_occurrence_index_d.clear_and_resize(number_of_unique_representations + 1); // <- +1 for the additional element unique_representations_d.clear_and_resize(number_of_unique_representations); find_first_occurrences_of_representations_kernel<<<number_of_blocks, number_of_threads, 0, cuda_stream>>>(representation_index_mask_d.data(), input_representations_d.data(), representation_index_mask_d.size(), first_occurrence_index_d.data(), unique_representations_d.data()); // last element is the total number of elements in representations array std::uint32_t input_representations_size = input_representations_d.size(); cudautils::set_device_value_async(first_occurrence_index_d.end() - 1, &input_representations_size, cuda_stream); // H2D copy cudaStreamSynchronize(cuda_stream); //async H2D copy has to complete before input_representations_size goes out of scope } __global__ void find_first_occurrences_of_representations_kernel(const std::uint64_t* const representation_index_mask_d, const representation_t* const input_representations_d, const std::size_t number_of_input_elements, std::uint32_t* const starting_index_of_each_representation_d, representation_t* const unique_representations_d) { // one thread per element of input_representations_d (i.e. sketch_element) std::uint64_t index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= number_of_input_elements) return; if (index == 0) { starting_index_of_each_representation_d[0] = 0; unique_representations_d[0] = input_representations_d[0]; } else { // representation_index_mask_d gives a unique index to each representation, starting from 1, thus '-1' const auto representation_index_mask_for_this_index = representation_index_mask_d[index]; if (representation_index_mask_for_this_index != representation_index_mask_d[index - 1]) { // if new representation is not the same as its left neighbor // save the index at which that representation starts starting_index_of_each_representation_d[representation_index_mask_for_this_index - 1] = index; unique_representations_d[representation_index_mask_for_this_index - 1] = input_representations_d[index]; } } } __global__ void compress_unique_representations_after_filtering_kernel(const std::uint64_t number_of_unique_representation_before_compression, const representation_t* const unique_representations_before_compression_d, const std::uint32_t* const first_occurrence_of_representation_before_compression_d, const std::uint32_t* const new_unique_representation_index_d, representation_t* const unique_representations_after_compression_d, std::uint32_t* const first_occurrence_of_representation_after_compression_d) { const std::uint64_t i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= number_of_unique_representation_before_compression + 1) // +1 for the additional element in first_occurrence_of_representation return; if (i == number_of_unique_representation_before_compression) // additional element in first_occurrence_of_representation { first_occurrence_of_representation_after_compression_d[new_unique_representation_index_d[i]] = first_occurrence_of_representation_before_compression_d[i]; } else { // TODO: load these two values into shared memory if (first_occurrence_of_representation_before_compression_d[i] != first_occurrence_of_representation_before_compression_d[i + 1]) // if it's the same that means that this representation has been filtered out { const std::uint32_t new_unique_representation_index = new_unique_representation_index_d[i]; unique_representations_after_compression_d[new_unique_representation_index] = unique_representations_before_compression_d[i]; first_occurrence_of_representation_after_compression_d[new_unique_representation_index] = first_occurrence_of_representation_before_compression_d[i]; } } } } // namespace index_gpu } // namespace details } // namespace cudamapper } // namespace genomeworks } // namespace claraparabricks
32864a20acbc050f38874b3aeb0ea8ca583c7cab.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.5.4) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date October 2020 @precisions normal z -> s d c */ #include "magma_internal.h" #include "commonblas_z.h" #include "magma_templates.h" // 512 is maximum number of threads for CUDA capability 1.x #define BLOCK_SIZE 512 /******************************************************************************/ extern "C" __global__ void magma_zgemv_kernel1(int m, const magmaDoubleComplex * __restrict__ V, int ldv, const magmaDoubleComplex * __restrict__ c, magmaDoubleComplex *dwork) { const int i = threadIdx.x; const magmaDoubleComplex *dV = V + (blockIdx.x) * ldv; __shared__ magmaDoubleComplex sum[ BLOCK_SIZE ]; magmaDoubleComplex lsum; /* lsum := v**H * C */ lsum = MAGMA_Z_ZERO; for (int j = i; j < m; j += BLOCK_SIZE) lsum += MAGMA_Z_MUL( MAGMA_Z_CONJ( dV[j] ), c[j] ); sum[i] = lsum; magma_sum_reduce< BLOCK_SIZE >( i, sum ); __syncthreads(); if (i == 0) dwork [blockIdx.x] = sum[0]; } /******************************************************************************/ /* Call magma_zgemv_kernel3<<< n, BLOCK_SIZE, 0, queue->cuda_stream() >>>(m, V, ldv, c, dwork, tau) to compute ZGEMV( "Conjugate transpose", m, n, -tau[0], V, ldv, c, 1, zero, dwork, 1) and to set c[0] to 1. i.e., work = -tau[0] V**H c */ extern "C" __global__ void magma_zgemv_kernel3(int m, const magmaDoubleComplex * __restrict__ V, int ldv, magmaDoubleComplex *c, magmaDoubleComplex *dwork, magmaDoubleComplex *tau) { const int i = threadIdx.x; const magmaDoubleComplex *dV = V + (blockIdx.x) * ldv; __shared__ magmaDoubleComplex sum[ BLOCK_SIZE ]; magmaDoubleComplex lsum; if (i == 0) c[0] = MAGMA_Z_ONE; /* lsum := v**H * C */ lsum = MAGMA_Z_ZERO; for (int j = i; j < m; j += BLOCK_SIZE) lsum += MAGMA_Z_MUL( MAGMA_Z_CONJ( dV[j] ), c[j] ); sum[i] = lsum; magma_sum_reduce< BLOCK_SIZE >( i, sum ); __syncthreads(); if (i == 0) dwork [blockIdx.x] = -tau[0]*sum[0]; } /******************************************************************************/ extern "C" __global__ void magma_zgemv_kernel2(int m, int n, const magmaDoubleComplex * __restrict__ V, int ldv, const magmaDoubleComplex * __restrict__ x, magmaDoubleComplex *c) { const int i = threadIdx.x; const int j = i + BLOCK_SIZE * blockIdx.x; magmaDoubleComplex lsum; V += j; lsum = MAGMA_Z_ZERO; if (j < m) { for (int k=0; k < n; k++) lsum += MAGMA_Z_MUL( V[k*ldv], x[k]); c[j] -= lsum; } } /******************************************************************************/ /* Apply a complex block reflector H to a complex vector C from the left (i.e., C = H C). H is represented in the form H = I - V T V**H where T is the complex k-by-k upper triangular matrix in the representation of the block reflector, and V is a complex block of k elementary reflectors. */ extern "C" void magma_zlarfbx_gpu( magma_int_t m, magma_int_t k, magmaDoubleComplex_ptr V, magma_int_t ldv, magmaDoubleComplex_ptr dT, magma_int_t ldt, magmaDoubleComplex_ptr c, magmaDoubleComplex_ptr dwork, magma_queue_t queue ) { /* dwork = V**H c */ hipLaunchKernelGGL(( magma_zgemv_kernel1) , dim3(k), dim3(BLOCK_SIZE), 0, queue->cuda_stream() , m, V, ldv, c, dwork); /* dwork = T**H dwork */ hipLaunchKernelGGL(( magma_ztrmv_tkernel) , dim3(k), dim3(k), 0, queue->cuda_stream() , dT, ldt, dwork, dwork+k); /* c = c - V dwork */ dim3 blocks3( magma_ceildiv( m, BLOCK_SIZE ) ); dim3 threads3( BLOCK_SIZE ); hipLaunchKernelGGL(( magma_zgemv_kernel2) , dim3(blocks3), dim3(threads3), 0, queue->cuda_stream() , m, k, V, ldv, dwork+k, c); }
32864a20acbc050f38874b3aeb0ea8ca583c7cab.cu
/* -- MAGMA (version 2.5.4) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date October 2020 @precisions normal z -> s d c */ #include "magma_internal.h" #include "commonblas_z.h" #include "magma_templates.h" // 512 is maximum number of threads for CUDA capability 1.x #define BLOCK_SIZE 512 /******************************************************************************/ extern "C" __global__ void magma_zgemv_kernel1(int m, const magmaDoubleComplex * __restrict__ V, int ldv, const magmaDoubleComplex * __restrict__ c, magmaDoubleComplex *dwork) { const int i = threadIdx.x; const magmaDoubleComplex *dV = V + (blockIdx.x) * ldv; __shared__ magmaDoubleComplex sum[ BLOCK_SIZE ]; magmaDoubleComplex lsum; /* lsum := v**H * C */ lsum = MAGMA_Z_ZERO; for (int j = i; j < m; j += BLOCK_SIZE) lsum += MAGMA_Z_MUL( MAGMA_Z_CONJ( dV[j] ), c[j] ); sum[i] = lsum; magma_sum_reduce< BLOCK_SIZE >( i, sum ); __syncthreads(); if (i == 0) dwork [blockIdx.x] = sum[0]; } /******************************************************************************/ /* Call magma_zgemv_kernel3<<< n, BLOCK_SIZE, 0, queue->cuda_stream() >>>(m, V, ldv, c, dwork, tau) to compute ZGEMV( "Conjugate transpose", m, n, -tau[0], V, ldv, c, 1, zero, dwork, 1) and to set c[0] to 1. i.e., work = -tau[0] V**H c */ extern "C" __global__ void magma_zgemv_kernel3(int m, const magmaDoubleComplex * __restrict__ V, int ldv, magmaDoubleComplex *c, magmaDoubleComplex *dwork, magmaDoubleComplex *tau) { const int i = threadIdx.x; const magmaDoubleComplex *dV = V + (blockIdx.x) * ldv; __shared__ magmaDoubleComplex sum[ BLOCK_SIZE ]; magmaDoubleComplex lsum; if (i == 0) c[0] = MAGMA_Z_ONE; /* lsum := v**H * C */ lsum = MAGMA_Z_ZERO; for (int j = i; j < m; j += BLOCK_SIZE) lsum += MAGMA_Z_MUL( MAGMA_Z_CONJ( dV[j] ), c[j] ); sum[i] = lsum; magma_sum_reduce< BLOCK_SIZE >( i, sum ); __syncthreads(); if (i == 0) dwork [blockIdx.x] = -tau[0]*sum[0]; } /******************************************************************************/ extern "C" __global__ void magma_zgemv_kernel2(int m, int n, const magmaDoubleComplex * __restrict__ V, int ldv, const magmaDoubleComplex * __restrict__ x, magmaDoubleComplex *c) { const int i = threadIdx.x; const int j = i + BLOCK_SIZE * blockIdx.x; magmaDoubleComplex lsum; V += j; lsum = MAGMA_Z_ZERO; if (j < m) { for (int k=0; k < n; k++) lsum += MAGMA_Z_MUL( V[k*ldv], x[k]); c[j] -= lsum; } } /******************************************************************************/ /* Apply a complex block reflector H to a complex vector C from the left (i.e., C = H C). H is represented in the form H = I - V T V**H where T is the complex k-by-k upper triangular matrix in the representation of the block reflector, and V is a complex block of k elementary reflectors. */ extern "C" void magma_zlarfbx_gpu( magma_int_t m, magma_int_t k, magmaDoubleComplex_ptr V, magma_int_t ldv, magmaDoubleComplex_ptr dT, magma_int_t ldt, magmaDoubleComplex_ptr c, magmaDoubleComplex_ptr dwork, magma_queue_t queue ) { /* dwork = V**H c */ magma_zgemv_kernel1 <<< k, BLOCK_SIZE, 0, queue->cuda_stream() >>> (m, V, ldv, c, dwork); /* dwork = T**H dwork */ magma_ztrmv_tkernel <<< k, k, 0, queue->cuda_stream() >>> ( dT, ldt, dwork, dwork+k); /* c = c - V dwork */ dim3 blocks3( magma_ceildiv( m, BLOCK_SIZE ) ); dim3 threads3( BLOCK_SIZE ); magma_zgemv_kernel2 <<< blocks3, threads3, 0, queue->cuda_stream() >>> ( m, k, V, ldv, dwork+k, c); }
6e512296bebf30ed90724c29e11c308368773adc.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <iostream> #include <vector> using namespace std; __global__ void sumReduction(float* v, int size, int jump) { // linear id unsigned int t = threadIdx.x; unsigned int t0 = blockIdx.x*blockDim.x; unsigned int k = jump*(t0 + t); // load vector into shared memory extern __shared__ float vs[]; vs[t] = v[k]; for (unsigned int stride = 1; stride < blockDim.x; stride *= 2) { __syncthreads(); if(t % (2*stride) == 0) vs[t] += vs[t + stride]; } if (t == 0) v[jump*t0] = vs[0]; } int main(int argc, char* argv[]) { // Query GPU properties hipDeviceProp_t dev_prop; hipGetDeviceProperties(&dev_prop, 0); cout << "---------------------------------------------" << endl; cout << " GPU PROPERTIES " << endl; cout << "---------------------------------------------" << endl; cout << "Device Name: " << dev_prop.name << endl; cout << "Memory Clock Rate: " << dev_prop.memoryClockRate/1.0e6 << " GHz" << endl; cout << "Memory Bandwidth: " << 2.0*dev_prop.memoryClockRate*(dev_prop.memoryBusWidth/8)/1.0e6 << " GB/s" << endl; cout << "Number of SM: " << dev_prop.multiProcessorCount << endl; cout << "Max Threads per SM: " << dev_prop.maxThreadsPerMultiProcessor << endl; cout << "Registers per Block: " << dev_prop.regsPerBlock << endl; cout << "Shared Memory per Block: " << dev_prop.sharedMemPerBlock << " B" << endl; cout << "Total Global Memory per Block: " << dev_prop.totalGlobalMem/1.0e9 << " GB" << endl; cout << endl; int size = atoi(argv[1]); // creating vector on host side vector<float> vec(size, 1.0f); // Copy vector on device side float* d_vec; hipMalloc((void**)&d_vec, size*sizeof(float)); hipMemcpy((void*)d_vec, (void*)vec.data(), size*sizeof(float), hipMemcpyHostToDevice); // call Kernel int blockDim = 4; int jump = 1; int number_of_blocks = size; do { number_of_blocks = ceil(number_of_blocks/(float)blockDim); hipLaunchKernelGGL(( sumReduction), dim3(number_of_blocks), dim3(blockDim), blockDim*sizeof(float), 0, d_vec, size, jump); jump *= 4; } while (number_of_blocks != 1); // Recover vector from device to host hipMemcpy((void*)vec.data(), (void*)d_vec, size*sizeof(float), hipMemcpyDeviceToHost); // Check results if (fabs(vec[0] - size) > 0.0001f) cout << "ERROR: something is not right." << endl; // Finalize storage hipFree(d_vec); cout << "Closing..." << endl; return 0; }
6e512296bebf30ed90724c29e11c308368773adc.cu
#include <cuda.h> #include <iostream> #include <vector> using namespace std; __global__ void sumReduction(float* v, int size, int jump) { // linear id unsigned int t = threadIdx.x; unsigned int t0 = blockIdx.x*blockDim.x; unsigned int k = jump*(t0 + t); // load vector into shared memory extern __shared__ float vs[]; vs[t] = v[k]; for (unsigned int stride = 1; stride < blockDim.x; stride *= 2) { __syncthreads(); if(t % (2*stride) == 0) vs[t] += vs[t + stride]; } if (t == 0) v[jump*t0] = vs[0]; } int main(int argc, char* argv[]) { // Query GPU properties cudaDeviceProp dev_prop; cudaGetDeviceProperties(&dev_prop, 0); cout << "---------------------------------------------" << endl; cout << " GPU PROPERTIES " << endl; cout << "---------------------------------------------" << endl; cout << "Device Name: " << dev_prop.name << endl; cout << "Memory Clock Rate: " << dev_prop.memoryClockRate/1.0e6 << " GHz" << endl; cout << "Memory Bandwidth: " << 2.0*dev_prop.memoryClockRate*(dev_prop.memoryBusWidth/8)/1.0e6 << " GB/s" << endl; cout << "Number of SM: " << dev_prop.multiProcessorCount << endl; cout << "Max Threads per SM: " << dev_prop.maxThreadsPerMultiProcessor << endl; cout << "Registers per Block: " << dev_prop.regsPerBlock << endl; cout << "Shared Memory per Block: " << dev_prop.sharedMemPerBlock << " B" << endl; cout << "Total Global Memory per Block: " << dev_prop.totalGlobalMem/1.0e9 << " GB" << endl; cout << endl; int size = atoi(argv[1]); // creating vector on host side vector<float> vec(size, 1.0f); // Copy vector on device side float* d_vec; cudaMalloc((void**)&d_vec, size*sizeof(float)); cudaMemcpy((void*)d_vec, (void*)vec.data(), size*sizeof(float), cudaMemcpyHostToDevice); // call Kernel int blockDim = 4; int jump = 1; int number_of_blocks = size; do { number_of_blocks = ceil(number_of_blocks/(float)blockDim); sumReduction<<<number_of_blocks, blockDim, blockDim*sizeof(float)>>>(d_vec, size, jump); jump *= 4; } while (number_of_blocks != 1); // Recover vector from device to host cudaMemcpy((void*)vec.data(), (void*)d_vec, size*sizeof(float), cudaMemcpyDeviceToHost); // Check results if (fabs(vec[0] - size) > 0.0001f) cout << "ERROR: something is not right." << endl; // Finalize storage cudaFree(d_vec); cout << "Closing..." << endl; return 0; }
e473b59f9e8cfebfbd3a28c946cd26921a4e9892.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "caffe/device.hpp" #include "caffe/util/math_functions.hpp" #ifdef USE_GREENTEA #include "caffe/greentea/greentea.hpp" #include "caffe/greentea/greentea_math_functions.hpp" #endif namespace caffe { #ifdef USE_ROCM template <typename Dtype> __global__ void AdamUpdate(int N, Dtype* g, Dtype* m, Dtype* v, Dtype beta1, Dtype beta2, Dtype eps_hat, Dtype corrected_local_rate) { CUDA_KERNEL_LOOP(i, N) { float gi = g[i]; float mi = m[i] = m[i]*beta1 + gi*(1-beta1); float vi = v[i] = v[i]*beta2 + gi*gi*(1-beta2); g[i] = corrected_local_rate * mi / (sqrt(vi) + eps_hat); } } #endif template<typename Dtype> void adam_update_gpu(device* dev, int_tp N, Dtype* g, Dtype* m, Dtype* v, Dtype beta1, Dtype beta2, Dtype eps_hat, Dtype corrected_local_rate) { if (dev->backend() == BACKEND_CUDA) { #ifdef USE_ROCM AdamUpdate<Dtype> // NOLINT_NEXT_LINE(whitespace/operators) CUDA_KERNEL(CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS) ( N, g, m, v, beta1, beta2, eps_hat, corrected_local_rate); CUDA_POST_KERNEL_CHECK; #endif // USE_ROCM } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context(dev->id()); viennacl::ocl::program &program = dev->program(); viennacl::ocl::kernel &oclk_adam_update = program.get_kernel( CL_KERNEL_SELECT("adam_update")); viennacl::ocl::enqueue( oclk_adam_update(N, WrapHandle((cl_mem) g, &ctx), WrapHandle((cl_mem) m, &ctx), WrapHandle((cl_mem) v, &ctx), fixup_arg_type(beta1), fixup_arg_type(beta2), fixup_arg_type(eps_hat), fixup_arg_type(corrected_local_rate)), ctx.get_queue()); #endif // USE_GREENTEA } } #ifdef HAS_HALF_SUPPORT template void adam_update_gpu<half>(device*, int_tp, half*, half*, half*, half, half, half, half); #endif template void adam_update_gpu<float>(device*, int_tp, float*, float*, float*, float, float, float, float); template void adam_update_gpu<double>(device*, int_tp, double*, double*, double*, double, double, double, double); } // namespace caffe
e473b59f9e8cfebfbd3a28c946cd26921a4e9892.cu
#include "caffe/device.hpp" #include "caffe/util/math_functions.hpp" #ifdef USE_GREENTEA #include "caffe/greentea/greentea.hpp" #include "caffe/greentea/greentea_math_functions.hpp" #endif namespace caffe { #ifdef USE_CUDA template <typename Dtype> __global__ void AdamUpdate(int N, Dtype* g, Dtype* m, Dtype* v, Dtype beta1, Dtype beta2, Dtype eps_hat, Dtype corrected_local_rate) { CUDA_KERNEL_LOOP(i, N) { float gi = g[i]; float mi = m[i] = m[i]*beta1 + gi*(1-beta1); float vi = v[i] = v[i]*beta2 + gi*gi*(1-beta2); g[i] = corrected_local_rate * mi / (sqrt(vi) + eps_hat); } } #endif template<typename Dtype> void adam_update_gpu(device* dev, int_tp N, Dtype* g, Dtype* m, Dtype* v, Dtype beta1, Dtype beta2, Dtype eps_hat, Dtype corrected_local_rate) { if (dev->backend() == BACKEND_CUDA) { #ifdef USE_CUDA AdamUpdate<Dtype> // NOLINT_NEXT_LINE(whitespace/operators) CUDA_KERNEL(CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS) ( N, g, m, v, beta1, beta2, eps_hat, corrected_local_rate); CUDA_POST_KERNEL_CHECK; #endif // USE_CUDA } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context(dev->id()); viennacl::ocl::program &program = dev->program(); viennacl::ocl::kernel &oclk_adam_update = program.get_kernel( CL_KERNEL_SELECT("adam_update")); viennacl::ocl::enqueue( oclk_adam_update(N, WrapHandle((cl_mem) g, &ctx), WrapHandle((cl_mem) m, &ctx), WrapHandle((cl_mem) v, &ctx), fixup_arg_type(beta1), fixup_arg_type(beta2), fixup_arg_type(eps_hat), fixup_arg_type(corrected_local_rate)), ctx.get_queue()); #endif // USE_GREENTEA } } #ifdef HAS_HALF_SUPPORT template void adam_update_gpu<half>(device*, int_tp, half*, half*, half*, half, half, half, half); #endif template void adam_update_gpu<float>(device*, int_tp, float*, float*, float*, float, float, float, float); template void adam_update_gpu<double>(device*, int_tp, double*, double*, double*, double, double, double, double); } // namespace caffe
292b51b3554b34b9bb1e805724c79841cb07df22.hip
// !!! This is a file automatically generated by hipify!!! /* Authors - Dibyadarshan Hota 16CO154 - Omkar Prabhu 16CO233 */ #include <iostream> #include <stdio.h> #include <sstream> #include <string.h> #include <hip/hip_runtime.h> #define ll long long using namespace std; // ============== Kernel for betweenness calculation ======================== __global__ void betweenness_centrality_kernel (int nodes, int *C, int *R, int *d, int *sigma, float *delta, float *bc, int *reverse_stack) { // Used to store the position where nodes are pushed as a stack __shared__ int position; // Used to store the source vertex __shared__ int s; //__shared__ int end_pos; int idx = threadIdx.x; if (idx == 0) { // Initializing source s = 0; //end_pos = 1; //reverse_bfs_limit[0] = 0; } __syncthreads(); while (s < nodes) { __syncthreads(); // ============== Vertex parallel method for BFS ======================== //Initialize d and sigma for(int v=idx; v<nodes; v+=blockDim.x) { if(v == s) { d[v] = 0; sigma[v] = 1; } else { d[v] = INT_MAX; sigma[v] = 0; } delta[v] = 0; } __syncthreads(); __shared__ int current_depth; __shared__ bool done; // ============== INIT ======================== if(idx == 0) { done = false; current_depth = 0; position = 0; } __syncthreads(); // SP Calc while(!done) { __syncthreads(); done = true; __syncthreads(); for(int v=idx; v<nodes; v+=blockDim.x) { if(d[v] == current_depth) { // ============== Storing nodes for reverse BFS ======================== int t = atomicAdd(&position,1); reverse_stack[t] = v; // ============== Relaxation step to find minimum distance ======================== for(int r=R[v]; r<R[v+1]; r++) { int w = C[r]; if(d[w] == INT_MAX) { d[w] = d[v] + 1; done = false; } if(d[w] == (d[v] + 1)) { atomicAdd(&sigma[w],sigma[v]); } } } } __syncthreads(); if(idx == 0){ current_depth++; //reverse_bfs_limit[end_pos] = position; //++end_pos; } } // Parallel Vertex Parallel implementation (uncomment the following lines and comment the ones below) __syncthreads(); // atomicSub(&end_pos,2); // for(int itr1 = end_pos; itr1 >= 0; --itr1){ // for(int itr2 = reverse_bfs_limit[itr1] + idx; itr2 < reverse_bfs_limit[itr1+1]; itr2+=blockDim.x){ // // reverse_stack[itr2] is one node // for(int itr3 = R[reverse_stack[itr2]]; itr3 < R[reverse_stack[itr2] + 1]; ++itr3){ // int consider = C[itr3]; // // C[itr3] other node // if(d[consider] == d[reverse_stack[itr2]]-1){ // delta[consider] += ( ((float)sigma[consider]/sigma[reverse_stack[itr2]]) * ((float)1 + delta[reverse_stack[itr2]]) ); // } // } // if(reverse_stack[itr2] != s){ // bc[reverse_stack[itr2]] += delta[reverse_stack[itr2]]; // } // } // __syncthreads(); // } // Serialized Vertex Parallel implementation. Comment the following for parallel implementation if(idx == 0){ for(int itr1 = nodes - 1; itr1 >= 0; --itr1){ for(int itr2 = R[reverse_stack[itr1]]; itr2 < R[reverse_stack[itr1] + 1]; ++itr2){ int consider = C[itr2]; if(d[consider] == d[reverse_stack[itr1]]-1){ delta[consider] += ( ((float)sigma[consider]/sigma[reverse_stack[itr1]]) * ((float)1 + delta[reverse_stack[itr1]]) ); } } if(reverse_stack[itr1] != s){ bc[reverse_stack[itr1]] += delta[reverse_stack[itr1]]; } } } // ============== Incrementing source ======================== __syncthreads(); if (idx == 0) { s += 1; } } } int main () { // Uncomment for reading files in stdin // freopen("graph", "r", stdin); // ============== INIT ======================== // nodes and edges int nodes, edges; cin>>nodes>>edges; // compressed adjancency list int * V = new int[nodes + 1]; int * E = new int[2 * edges]; // ============== Formation of compressed adjacency for CSR ======================== string line; int node = 0; int counter = 0; getline(cin, line); for (int i = 0; i < nodes; ++i) { getline(cin, line); V[node] = counter; istringstream is(line); int tmp; while (is >> tmp) { E[counter] = tmp; counter += 1; } ++node; } V[node] = counter; // Uncomment for printing compressed adjacency list // cout<<"\n"; // for (int i = 0; i <= nodes; i++) { // cout<<V[i]<<" "; // } // cout<<"\n"; // for (int i = 0; i < 2 * edges; ++i) { // cout<<E[i]<<" "; // } // cout<<"\n"; // Initializations int *d = new int[nodes]; int *sigma = new int[nodes]; float *delta = new float[nodes]; float *bc = new float[nodes]; memset(bc,0,sizeof(bc)); int *d_d, *d_sigma, *d_V, *d_E, *d_reverse_stack; float *d_delta, *d_bc; // Allocating memory via cudamalloc hipMalloc((void**)&d_d, sizeof(int) * nodes); // hipMalloc((void**)&d_end_point, sizeof(int) * (nodes + 1)); hipMalloc((void**)&d_sigma, sizeof(int) * nodes); hipMalloc((void**)&d_reverse_stack, sizeof(int) * nodes); hipMalloc((void**)&d_V, sizeof(int) * (nodes + 1)); hipMalloc((void**)&d_E, sizeof(int) * (2*edges)); hipMalloc((void**)&d_delta, sizeof(float) * nodes); hipMalloc((void**)&d_bc, sizeof(float) * nodes); hipMemcpy(d_V, V, sizeof(int) * (nodes+1), hipMemcpyHostToDevice); hipMemcpy(d_E, E, sizeof(int) * (2*edges), hipMemcpyHostToDevice); hipMemcpy(d_bc, bc, sizeof(float) * (nodes), hipMemcpyHostToDevice); // hipMemcpy(d_delta, delta, sizeof(float) * (nodes), hipMemcpyHostToDevice); // ============== Kernel call ======================== hipLaunchKernelGGL(( betweenness_centrality_kernel) , dim3(1), dim3(256), 0, 0, nodes, d_E, d_V, d_d, d_sigma, d_delta, d_bc, d_reverse_stack); // hipMemcpy(d, d_d, sizeof(float) * nodes, hipMemcpyDeviceToHost); // hipMemcpy(sigma, d_sigma, sizeof(float) * nodes, hipMemcpyDeviceToHost); hipMemcpy(bc, d_bc, sizeof(float) * nodes, hipMemcpyDeviceToHost); // hipMemcpy(delta, d_delta, sizeof(float) * nodes, hipMemcpyDeviceToHost); cout<<"Res: \n"; for (int i = 0; i < nodes; i++) { printf("%f ", bc[i]/2.0); // cout<<bc[i]; } cout<<endl; // ============== Deallocating memory ======================== hipFree(d_sigma); hipFree(d_d); hipFree(d_V); hipFree(d_E); hipFree(d_delta); hipFree(d_bc); hipFree(d_reverse_stack); // hipFree(d_end_point); free(E); free(V); free(d); free(sigma); free(delta); free(bc); return 0; }
292b51b3554b34b9bb1e805724c79841cb07df22.cu
/* Authors - Dibyadarshan Hota 16CO154 - Omkar Prabhu 16CO233 */ #include <iostream> #include <stdio.h> #include <sstream> #include <string.h> #include <cuda.h> #define ll long long using namespace std; // ============== Kernel for betweenness calculation ======================== __global__ void betweenness_centrality_kernel (int nodes, int *C, int *R, int *d, int *sigma, float *delta, float *bc, int *reverse_stack) { // Used to store the position where nodes are pushed as a stack __shared__ int position; // Used to store the source vertex __shared__ int s; //__shared__ int end_pos; int idx = threadIdx.x; if (idx == 0) { // Initializing source s = 0; //end_pos = 1; //reverse_bfs_limit[0] = 0; } __syncthreads(); while (s < nodes) { __syncthreads(); // ============== Vertex parallel method for BFS ======================== //Initialize d and sigma for(int v=idx; v<nodes; v+=blockDim.x) { if(v == s) { d[v] = 0; sigma[v] = 1; } else { d[v] = INT_MAX; sigma[v] = 0; } delta[v] = 0; } __syncthreads(); __shared__ int current_depth; __shared__ bool done; // ============== INIT ======================== if(idx == 0) { done = false; current_depth = 0; position = 0; } __syncthreads(); // SP Calc while(!done) { __syncthreads(); done = true; __syncthreads(); for(int v=idx; v<nodes; v+=blockDim.x) { if(d[v] == current_depth) { // ============== Storing nodes for reverse BFS ======================== int t = atomicAdd(&position,1); reverse_stack[t] = v; // ============== Relaxation step to find minimum distance ======================== for(int r=R[v]; r<R[v+1]; r++) { int w = C[r]; if(d[w] == INT_MAX) { d[w] = d[v] + 1; done = false; } if(d[w] == (d[v] + 1)) { atomicAdd(&sigma[w],sigma[v]); } } } } __syncthreads(); if(idx == 0){ current_depth++; //reverse_bfs_limit[end_pos] = position; //++end_pos; } } // Parallel Vertex Parallel implementation (uncomment the following lines and comment the ones below) __syncthreads(); // atomicSub(&end_pos,2); // for(int itr1 = end_pos; itr1 >= 0; --itr1){ // for(int itr2 = reverse_bfs_limit[itr1] + idx; itr2 < reverse_bfs_limit[itr1+1]; itr2+=blockDim.x){ // // reverse_stack[itr2] is one node // for(int itr3 = R[reverse_stack[itr2]]; itr3 < R[reverse_stack[itr2] + 1]; ++itr3){ // int consider = C[itr3]; // // C[itr3] other node // if(d[consider] == d[reverse_stack[itr2]]-1){ // delta[consider] += ( ((float)sigma[consider]/sigma[reverse_stack[itr2]]) * ((float)1 + delta[reverse_stack[itr2]]) ); // } // } // if(reverse_stack[itr2] != s){ // bc[reverse_stack[itr2]] += delta[reverse_stack[itr2]]; // } // } // __syncthreads(); // } // Serialized Vertex Parallel implementation. Comment the following for parallel implementation if(idx == 0){ for(int itr1 = nodes - 1; itr1 >= 0; --itr1){ for(int itr2 = R[reverse_stack[itr1]]; itr2 < R[reverse_stack[itr1] + 1]; ++itr2){ int consider = C[itr2]; if(d[consider] == d[reverse_stack[itr1]]-1){ delta[consider] += ( ((float)sigma[consider]/sigma[reverse_stack[itr1]]) * ((float)1 + delta[reverse_stack[itr1]]) ); } } if(reverse_stack[itr1] != s){ bc[reverse_stack[itr1]] += delta[reverse_stack[itr1]]; } } } // ============== Incrementing source ======================== __syncthreads(); if (idx == 0) { s += 1; } } } int main () { // Uncomment for reading files in stdin // freopen("graph", "r", stdin); // ============== INIT ======================== // nodes and edges int nodes, edges; cin>>nodes>>edges; // compressed adjancency list int * V = new int[nodes + 1]; int * E = new int[2 * edges]; // ============== Formation of compressed adjacency for CSR ======================== string line; int node = 0; int counter = 0; getline(cin, line); for (int i = 0; i < nodes; ++i) { getline(cin, line); V[node] = counter; istringstream is(line); int tmp; while (is >> tmp) { E[counter] = tmp; counter += 1; } ++node; } V[node] = counter; // Uncomment for printing compressed adjacency list // cout<<"\n"; // for (int i = 0; i <= nodes; i++) { // cout<<V[i]<<" "; // } // cout<<"\n"; // for (int i = 0; i < 2 * edges; ++i) { // cout<<E[i]<<" "; // } // cout<<"\n"; // Initializations int *d = new int[nodes]; int *sigma = new int[nodes]; float *delta = new float[nodes]; float *bc = new float[nodes]; memset(bc,0,sizeof(bc)); int *d_d, *d_sigma, *d_V, *d_E, *d_reverse_stack; float *d_delta, *d_bc; // Allocating memory via cudamalloc cudaMalloc((void**)&d_d, sizeof(int) * nodes); // cudaMalloc((void**)&d_end_point, sizeof(int) * (nodes + 1)); cudaMalloc((void**)&d_sigma, sizeof(int) * nodes); cudaMalloc((void**)&d_reverse_stack, sizeof(int) * nodes); cudaMalloc((void**)&d_V, sizeof(int) * (nodes + 1)); cudaMalloc((void**)&d_E, sizeof(int) * (2*edges)); cudaMalloc((void**)&d_delta, sizeof(float) * nodes); cudaMalloc((void**)&d_bc, sizeof(float) * nodes); cudaMemcpy(d_V, V, sizeof(int) * (nodes+1), cudaMemcpyHostToDevice); cudaMemcpy(d_E, E, sizeof(int) * (2*edges), cudaMemcpyHostToDevice); cudaMemcpy(d_bc, bc, sizeof(float) * (nodes), cudaMemcpyHostToDevice); // cudaMemcpy(d_delta, delta, sizeof(float) * (nodes), cudaMemcpyHostToDevice); // ============== Kernel call ======================== betweenness_centrality_kernel <<<1, 256>>> (nodes, d_E, d_V, d_d, d_sigma, d_delta, d_bc, d_reverse_stack); // cudaMemcpy(d, d_d, sizeof(float) * nodes, cudaMemcpyDeviceToHost); // cudaMemcpy(sigma, d_sigma, sizeof(float) * nodes, cudaMemcpyDeviceToHost); cudaMemcpy(bc, d_bc, sizeof(float) * nodes, cudaMemcpyDeviceToHost); // cudaMemcpy(delta, d_delta, sizeof(float) * nodes, cudaMemcpyDeviceToHost); cout<<"Res: \n"; for (int i = 0; i < nodes; i++) { printf("%f ", bc[i]/2.0); // cout<<bc[i]; } cout<<endl; // ============== Deallocating memory ======================== cudaFree(d_sigma); cudaFree(d_d); cudaFree(d_V); cudaFree(d_E); cudaFree(d_delta); cudaFree(d_bc); cudaFree(d_reverse_stack); // cudaFree(d_end_point); free(E); free(V); free(d); free(sigma); free(delta); free(bc); return 0; }
5bd4e7e5a239a909061d0b7682bbab9c1933705e.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) Microsoft Corporation. // SPDX-License-Identifier: Apache-2.0 // DeepSpeed Team #include "cublas_wrappers.h" #ifdef __HIP_PLATFORM_HCC__ int cublas_gemm_ex(rocblas_handle handle, rocblas_operation transa, rocblas_operation transb, int m, int n, int k, const float* alpha, const float* beta, const float* A, const float* B, float* C, rocblas_gemm_algo algo) #else int cublas_gemm_ex(hipblasHandle_t handle, hipblasOperation_t transa, hipblasOperation_t transb, int m, int n, int k, const float* alpha, const float* beta, const float* A, const float* B, float* C, hipblasGemmAlgo_t algo) #endif { #ifdef __HIP_PLATFORM_HCC__ rocblas_status status = rocblas_gemm_ex(handle, transa, transb, m, n, k, (const void*)alpha, (const void*)A, rocblas_datatype_f32_r, (transa == rocblas_operation_none) ? m : k, (const void*)B, rocblas_datatype_f32_r, (transb == rocblas_operation_none) ? k : n, (const void*)beta, C, rocblas_datatype_f32_r, m, C, rocblas_datatype_f32_r, m, rocblas_datatype_f32_r, algo, 0, 0); #else hipblasStatus_t status = hipblasGemmEx(handle, transa, transb, m, n, k, (const void*)alpha, (const void*)A, HIP_R_32F, (transa == HIPBLAS_OP_N) ? m : k, (const void*)B, HIP_R_32F, (transb == HIPBLAS_OP_N) ? k : n, (const void*)beta, C, HIP_R_32F, m, HIP_R_32F, algo); #endif #ifdef __HIP_PLATFORM_HCC__ if (status != rocblas_status_success) { #else if (status != HIPBLAS_STATUS_SUCCESS) { #endif fprintf(stderr, "!!!! kernel execution error. (m: %d, n: %d, k: %d, error: %d) \n", m, n, k, (int)status); return EXIT_FAILURE; } return 0; } #ifdef __HIP_PLATFORM_HCC__ int cublas_gemm_ex(rocblas_handle handle, rocblas_operation transa, rocblas_operation transb, int m, int n, int k, const float* alpha, const float* beta, const __half* A, const __half* B, __half* C, rocblas_gemm_algo algo) #else int cublas_gemm_ex(hipblasHandle_t handle, hipblasOperation_t transa, hipblasOperation_t transb, int m, int n, int k, const float* alpha, const float* beta, const __half* A, const __half* B, __half* C, hipblasGemmAlgo_t algo) #endif { #ifdef __HIP_PLATFORM_HCC__ rocblas_status status = rocblas_gemm_ex(handle, transa, transb, m, n, k, (const void*)alpha, (const void*)A, rocblas_datatype_f16_r, (transa == rocblas_operation_none) ? m : k, (const void*)B, rocblas_datatype_f16_r, (transb == rocblas_operation_none) ? k : n, (const void*)beta, (void*)C, rocblas_datatype_f16_r, m, (void*)C, rocblas_datatype_f16_r, m, rocblas_datatype_f32_r, algo, 0, 0); #else hipblasStatus_t status = hipblasGemmEx(handle, transa, transb, m, n, k, (const void*)alpha, (const void*)A, HIP_R_16F, (transa == HIPBLAS_OP_N) ? m : k, (const void*)B, HIP_R_16F, (transb == HIPBLAS_OP_N) ? k : n, (const void*)beta, (void*)C, HIP_R_16F, m, HIP_R_32F, algo); #endif #ifdef __HIP_PLATFORM_HCC__ if (status != rocblas_status_success) { #else if (status != HIPBLAS_STATUS_SUCCESS) { #endif fprintf(stderr, "!!!! kernel execution error. (m: %d, n: %d, k: %d, error: %d) \n", m, n, k, (int)status); return EXIT_FAILURE; } return 0; } #ifdef __HIP_PLATFORM_HCC__ int cublas_strided_batched_gemm(rocblas_handle handle, int m, int n, int k, const float* alpha, const float* beta, const float* A, const float* B, float* C, rocblas_operation op_A, rocblas_operation op_B, int stride_A, int stride_B, int stride_C, int batch, rocblas_gemm_algo algo) #else int cublas_strided_batched_gemm(hipblasHandle_t handle, int m, int n, int k, const float* alpha, const float* beta, const float* A, const float* B, float* C, hipblasOperation_t op_A, hipblasOperation_t op_B, int stride_A, int stride_B, int stride_C, int batch, hipblasGemmAlgo_t algo) #endif { #ifdef __HIP_PLATFORM_HCC__ rocblas_status status = rocblas_gemm_strided_batched_ex(handle, op_A, op_B, m, n, k, alpha, A, rocblas_datatype_f32_r, (op_A == rocblas_operation_none) ? m : k, stride_A, B, rocblas_datatype_f32_r, (op_B == rocblas_operation_none) ? k : n, stride_B, beta, C, rocblas_datatype_f32_r, m, stride_C, C, rocblas_datatype_f32_r, m, stride_C, batch, rocblas_datatype_f32_r, algo, 0, 0); #else hipblasStatus_t status = hipblasGemmStridedBatchedEx(handle, op_A, op_B, m, n, k, alpha, A, HIP_R_32F, (op_A == HIPBLAS_OP_N) ? m : k, stride_A, B, HIP_R_32F, (op_B == HIPBLAS_OP_N) ? k : n, stride_B, beta, C, HIP_R_32F, m, stride_C, batch, HIP_R_32F, algo); #endif #ifdef __HIP_PLATFORM_HCC__ if (status != rocblas_status_success) { #else if (status != HIPBLAS_STATUS_SUCCESS) { #endif fprintf(stderr, "!!!! kernel execution error. (batch: %d, m: %d, n: %d, k: %d, error: %d) \n", batch, m, n, k, (int)status); return EXIT_FAILURE; } return 0; } #ifdef __HIP_PLATFORM_HCC__ int cublas_strided_batched_gemm(rocblas_handle handle, int m, int n, int k, const float* alpha, const float* beta, const __half* A, const __half* B, __half* C, rocblas_operation op_A, rocblas_operation op_B, int stride_A, int stride_B, int stride_C, int batch, rocblas_gemm_algo algo) #else int cublas_strided_batched_gemm(hipblasHandle_t handle, int m, int n, int k, const float* alpha, const float* beta, const __half* A, const __half* B, __half* C, hipblasOperation_t op_A, hipblasOperation_t op_B, int stride_A, int stride_B, int stride_C, int batch, hipblasGemmAlgo_t algo) #endif { #ifdef __HIP_PLATFORM_HCC__ rocblas_status status = rocblas_gemm_strided_batched_ex(handle, op_A, op_B, m, n, k, alpha, A, rocblas_datatype_f16_r, (op_A == rocblas_operation_none) ? m : k, stride_A, B, rocblas_datatype_f16_r, (op_B == rocblas_operation_none) ? k : n, stride_B, beta, C, rocblas_datatype_f16_r, m, stride_C, C, rocblas_datatype_f16_r, m, stride_C, batch, rocblas_datatype_f32_r, algo, 0, 0); #else hipblasStatus_t status = hipblasGemmStridedBatchedEx(handle, op_A, op_B, m, n, k, alpha, A, HIP_R_16F, (op_A == HIPBLAS_OP_N) ? m : k, stride_A, B, HIP_R_16F, (op_B == HIPBLAS_OP_N) ? k : n, stride_B, beta, C, HIP_R_16F, m, stride_C, batch, HIP_R_32F, algo); #endif #ifdef __HIP_PLATFORM_HCC__ if (status != rocblas_status_success) { #else if (status != HIPBLAS_STATUS_SUCCESS) { #endif fprintf(stderr, "!!!! kernel execution error. (m: %d, n: %d, k: %d, error: %d) \n", m, n, k, (int)status); return EXIT_FAILURE; } return 0; }
5bd4e7e5a239a909061d0b7682bbab9c1933705e.cu
// Copyright (c) Microsoft Corporation. // SPDX-License-Identifier: Apache-2.0 // DeepSpeed Team #include "cublas_wrappers.h" #ifdef __HIP_PLATFORM_HCC__ int cublas_gemm_ex(rocblas_handle handle, rocblas_operation transa, rocblas_operation transb, int m, int n, int k, const float* alpha, const float* beta, const float* A, const float* B, float* C, rocblas_gemm_algo algo) #else int cublas_gemm_ex(cublasHandle_t handle, cublasOperation_t transa, cublasOperation_t transb, int m, int n, int k, const float* alpha, const float* beta, const float* A, const float* B, float* C, cublasGemmAlgo_t algo) #endif { #ifdef __HIP_PLATFORM_HCC__ rocblas_status status = rocblas_gemm_ex(handle, transa, transb, m, n, k, (const void*)alpha, (const void*)A, rocblas_datatype_f32_r, (transa == rocblas_operation_none) ? m : k, (const void*)B, rocblas_datatype_f32_r, (transb == rocblas_operation_none) ? k : n, (const void*)beta, C, rocblas_datatype_f32_r, m, C, rocblas_datatype_f32_r, m, rocblas_datatype_f32_r, algo, 0, 0); #else cublasStatus_t status = cublasGemmEx(handle, transa, transb, m, n, k, (const void*)alpha, (const void*)A, CUDA_R_32F, (transa == CUBLAS_OP_N) ? m : k, (const void*)B, CUDA_R_32F, (transb == CUBLAS_OP_N) ? k : n, (const void*)beta, C, CUDA_R_32F, m, CUDA_R_32F, algo); #endif #ifdef __HIP_PLATFORM_HCC__ if (status != rocblas_status_success) { #else if (status != CUBLAS_STATUS_SUCCESS) { #endif fprintf(stderr, "!!!! kernel execution error. (m: %d, n: %d, k: %d, error: %d) \n", m, n, k, (int)status); return EXIT_FAILURE; } return 0; } #ifdef __HIP_PLATFORM_HCC__ int cublas_gemm_ex(rocblas_handle handle, rocblas_operation transa, rocblas_operation transb, int m, int n, int k, const float* alpha, const float* beta, const __half* A, const __half* B, __half* C, rocblas_gemm_algo algo) #else int cublas_gemm_ex(cublasHandle_t handle, cublasOperation_t transa, cublasOperation_t transb, int m, int n, int k, const float* alpha, const float* beta, const __half* A, const __half* B, __half* C, cublasGemmAlgo_t algo) #endif { #ifdef __HIP_PLATFORM_HCC__ rocblas_status status = rocblas_gemm_ex(handle, transa, transb, m, n, k, (const void*)alpha, (const void*)A, rocblas_datatype_f16_r, (transa == rocblas_operation_none) ? m : k, (const void*)B, rocblas_datatype_f16_r, (transb == rocblas_operation_none) ? k : n, (const void*)beta, (void*)C, rocblas_datatype_f16_r, m, (void*)C, rocblas_datatype_f16_r, m, rocblas_datatype_f32_r, algo, 0, 0); #else cublasStatus_t status = cublasGemmEx(handle, transa, transb, m, n, k, (const void*)alpha, (const void*)A, CUDA_R_16F, (transa == CUBLAS_OP_N) ? m : k, (const void*)B, CUDA_R_16F, (transb == CUBLAS_OP_N) ? k : n, (const void*)beta, (void*)C, CUDA_R_16F, m, CUDA_R_32F, algo); #endif #ifdef __HIP_PLATFORM_HCC__ if (status != rocblas_status_success) { #else if (status != CUBLAS_STATUS_SUCCESS) { #endif fprintf(stderr, "!!!! kernel execution error. (m: %d, n: %d, k: %d, error: %d) \n", m, n, k, (int)status); return EXIT_FAILURE; } return 0; } #ifdef __HIP_PLATFORM_HCC__ int cublas_strided_batched_gemm(rocblas_handle handle, int m, int n, int k, const float* alpha, const float* beta, const float* A, const float* B, float* C, rocblas_operation op_A, rocblas_operation op_B, int stride_A, int stride_B, int stride_C, int batch, rocblas_gemm_algo algo) #else int cublas_strided_batched_gemm(cublasHandle_t handle, int m, int n, int k, const float* alpha, const float* beta, const float* A, const float* B, float* C, cublasOperation_t op_A, cublasOperation_t op_B, int stride_A, int stride_B, int stride_C, int batch, cublasGemmAlgo_t algo) #endif { #ifdef __HIP_PLATFORM_HCC__ rocblas_status status = rocblas_gemm_strided_batched_ex(handle, op_A, op_B, m, n, k, alpha, A, rocblas_datatype_f32_r, (op_A == rocblas_operation_none) ? m : k, stride_A, B, rocblas_datatype_f32_r, (op_B == rocblas_operation_none) ? k : n, stride_B, beta, C, rocblas_datatype_f32_r, m, stride_C, C, rocblas_datatype_f32_r, m, stride_C, batch, rocblas_datatype_f32_r, algo, 0, 0); #else cublasStatus_t status = cublasGemmStridedBatchedEx(handle, op_A, op_B, m, n, k, alpha, A, CUDA_R_32F, (op_A == CUBLAS_OP_N) ? m : k, stride_A, B, CUDA_R_32F, (op_B == CUBLAS_OP_N) ? k : n, stride_B, beta, C, CUDA_R_32F, m, stride_C, batch, CUDA_R_32F, algo); #endif #ifdef __HIP_PLATFORM_HCC__ if (status != rocblas_status_success) { #else if (status != CUBLAS_STATUS_SUCCESS) { #endif fprintf(stderr, "!!!! kernel execution error. (batch: %d, m: %d, n: %d, k: %d, error: %d) \n", batch, m, n, k, (int)status); return EXIT_FAILURE; } return 0; } #ifdef __HIP_PLATFORM_HCC__ int cublas_strided_batched_gemm(rocblas_handle handle, int m, int n, int k, const float* alpha, const float* beta, const __half* A, const __half* B, __half* C, rocblas_operation op_A, rocblas_operation op_B, int stride_A, int stride_B, int stride_C, int batch, rocblas_gemm_algo algo) #else int cublas_strided_batched_gemm(cublasHandle_t handle, int m, int n, int k, const float* alpha, const float* beta, const __half* A, const __half* B, __half* C, cublasOperation_t op_A, cublasOperation_t op_B, int stride_A, int stride_B, int stride_C, int batch, cublasGemmAlgo_t algo) #endif { #ifdef __HIP_PLATFORM_HCC__ rocblas_status status = rocblas_gemm_strided_batched_ex(handle, op_A, op_B, m, n, k, alpha, A, rocblas_datatype_f16_r, (op_A == rocblas_operation_none) ? m : k, stride_A, B, rocblas_datatype_f16_r, (op_B == rocblas_operation_none) ? k : n, stride_B, beta, C, rocblas_datatype_f16_r, m, stride_C, C, rocblas_datatype_f16_r, m, stride_C, batch, rocblas_datatype_f32_r, algo, 0, 0); #else cublasStatus_t status = cublasGemmStridedBatchedEx(handle, op_A, op_B, m, n, k, alpha, A, CUDA_R_16F, (op_A == CUBLAS_OP_N) ? m : k, stride_A, B, CUDA_R_16F, (op_B == CUBLAS_OP_N) ? k : n, stride_B, beta, C, CUDA_R_16F, m, stride_C, batch, CUDA_R_32F, algo); #endif #ifdef __HIP_PLATFORM_HCC__ if (status != rocblas_status_success) { #else if (status != CUBLAS_STATUS_SUCCESS) { #endif fprintf(stderr, "!!!! kernel execution error. (m: %d, n: %d, k: %d, error: %d) \n", m, n, k, (int)status); return EXIT_FAILURE; } return 0; }
d3e6d7cebda99d8c3ca9831606c6859b893549ef.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* ##################################################################### */ /* */ /* Notice: COPYRIGHT (C) GPU,GROUP. 2010 */ /* THIS PROGRAM IS PROVIDED UNDER THE TERMS OF GPU GROUP */ /* THE PROGRAM MAY ONLY */ /* BE USED IN A MANNER EXPLICITLY SPECIFIED IN THE GPU, */ /* WHICH INCLUDES LIMITATIONS ON COPYING, MODIFYING, */ /* REDISTRIBUTION AND WARANTIES. UNAUTHORIZED USE OF THIS */ /* PROGRAM IS SCTRICTLY PROHIBITED. */ /* ##################################################################### */ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <time.h> #include <cutil_inline.h> #include <cutil_inline_runtime.h> #include <cutil.h> #include "../inc/me_common.h" #include "../inc/me_context.h" #include "../inc/residual_coding.h" #include "../inc/encoder_tables.h" #include "../inc/cavlc_data.h" #include "../inc/cavlc.h" #include "../inc/deblock.h" #include "../inc/h264_common.h" #include "../inc/const_defines.h" #include "../inc/entropy_data.h" #include "../inc/encoder_context.h" #include "../inc/output.h" //include projec #include "me_LR_search_kernel.cu" #include "me_refinement_kernel.cu" #include "iframe_residual_coding_kernel.cu" #include "iframe_residual_chroma_kernel.cu" #include "pframe_inter_residual_coding_kernel.cu" #include "calc_cbp_and_total_coef.cu" #include "cavlc_block_context_kernel.cu" #include "cavlc_texture_symbols_kernel.cu" #include "cavlc_texture_codes_kernel.cu" #include "cavlc_header_code_kernel.cu" #include "cavlc_bit_pack_kernel.cu" #include "deblock_kernel.cu" ///kernel #include "intra_coding_kernel.cu" #include "intra_coding_kernel_chroma.cu" #include "pframe_LR_serach_kernel.cu" // void encode_cuda(encoder_context_t *p_enc ) { clock_t start,end,start1,end1; int enc_width,enc_height,enc_width_c,enc_height_c; int width_ref,height_ref,width_ref_c,height_ref_c; int num_mb_ver, num_mb_hor; int num_mbs; int i, j; int frame_sad_sum; S_QP_DATA QpData; short *pQuantTable; short *pDQuantTable; S_QP_DATA QpData_chroma; short *pQuantTable_chroma; short *pDQuantTable_chroma; start1 = clock(); unsigned char *p_input = p_enc->input_frame.y; unsigned char *p_recon = p_enc->pRecFrame->y; short *p_dct_coefs = p_enc->transform_coefs.pDctCoefs; short *p_dc_coefs = p_enc->transform_coefs.pDcCoefs; int QP = p_enc->frame_info.frame_qp; S_BLK_MB_INFO *p_blk_mb_info = p_enc->pBlkMBInfo; int *p_mb_qps = p_enc->p_mb_qps; int constrained_intra = p_enc->PictureParameterSet.constrained_intra_pred_flag; int intra_pred_select = p_enc->intra_prediction_selection; unsigned char *p_input_u = p_enc->input_frame.u; unsigned char *p_input_v = p_enc->input_frame.v; unsigned char *p_recon_u = p_enc->pRecFrame->u; unsigned char *p_recon_v = p_enc->pRecFrame->v; short *p_dct_coefs_u = p_enc->transform_coefs.pDctCoefs_u; short *p_dct_coefs_v = p_enc->transform_coefs.pDctCoefs_v; short *p_dc_coefs_u = p_enc->transform_coefs.pDcCoefs_u; short *p_dc_coefs_v = p_enc->transform_coefs.pDcCoefs_v; unsigned char *p_rec_ptr; unsigned char *p_rec_u_ptr, *p_rec_v_ptr; enc_width = p_enc->width; enc_height = p_enc->height; enc_width_c = p_enc->width/2; enc_height_c = p_enc->height/2; width_ref = enc_width + 2*REFERENCE_FRAME_PAD_AMT; height_ref = enc_height + 2*REFERENCE_FRAME_PAD_AMT; width_ref_c = width_ref>>1; height_ref_c = height_ref>>1; num_mb_hor = enc_width / MB_WIDTH; num_mb_ver = enc_height / MB_HEIGHT; num_mbs = num_mb_hor*num_mb_ver; p_rec_ptr = p_recon + RECON_FRAME_Y_OFFSET* width_ref + RECON_FRAME_X_OFFSET; p_rec_u_ptr = p_recon_u + RECON_FRAME_Y_OFFSET_C * width_ref_c + RECON_FRAME_X_OFFSET_C; p_rec_v_ptr = p_recon_v + RECON_FRAME_Y_OFFSET_C * width_ref_c + RECON_FRAME_X_OFFSET_C; unsigned char *dev_input; unsigned char *dev_recon; S_BLK_MB_INFO *dev_blk_mb_info; S_QP_DATA *dev_QpData; short *dev_dct_coefs; short *dev_dc_coefs; short *dev_Quant_tab; short *dev_Dquant_tab; unsigned char *dev_input_uv; unsigned char *dev_recon_uv; S_QP_DATA *dev_QpData_uv; short *dev_dct_coefs_uv; short *dev_dc_coefs_uv; short *Quant_tab_uv; short *Dquant_tab_uv; int *dev_ZigZag; cutilSafeCall(hipMalloc((void**) &dev_QpData,sizeof(S_QP_DATA))); cutilSafeCall(hipMalloc((void**) &dev_QpData_uv,sizeof(S_QP_DATA))); // cutilSafeCall(hipMalloc((void**) &Quant_tab_uv,BLOCKS_PER_MB*sizeof(short))); cutilSafeCall(hipMalloc((void**) &Dquant_tab_uv,BLOCKS_PER_MB*sizeof(short))); cutilSafeCall(hipMalloc((void**) &dev_ZigZag,BLOCKS_PER_MB*sizeof(int))); cutilSafeCall(hipMemcpy(dev_ZigZag,ZigZagScan,16*sizeof(int),hipMemcpyHostToDevice)); cutilSafeCall(hipMalloc((void**) &dev_input,MB_WIDTH*MB_HEIGHT*num_mbs)); // cutilSafeCall(hipMalloc((void**) &dev_recon,height_ref*width_ref*sizeof(char))); //(1088*1952B) cutilSafeCall(hipMalloc((void**) &dev_recon_uv,height_ref_c*width_ref_c*2)); // cutilSafeCall(hipMalloc((void**) &dev_input_uv,MB_TOTAL_SIZE_C*num_mbs*2)); // cutilSafeCall(hipMalloc((void**) &dev_blk_mb_info,BLOCKS_PER_MB*sizeof(S_BLK_MB_INFO)*num_mbs)); cutilSafeCall(hipMalloc((void**) &dev_dct_coefs,MB_TOTAL_SIZE*num_mbs*sizeof(short))); // cutilSafeCall(hipMalloc((void**) &dev_dc_coefs,BLOCKS_PER_MB*num_mbs*sizeof(short))); // cutilSafeCall(hipMalloc((void**) &dev_dct_coefs_uv, MB_TOTAL_SIZE_C*num_mbs*2*sizeof(short))); cutilSafeCall(hipMalloc((void**) &dev_dc_coefs_uv, BLOCKS_PER_MB_C*num_mbs*2*sizeof(short))); // cutilSafeCall(hipMalloc((void**) &dev_Quant_tab,BLOCKS_PER_MB*sizeof(short))); // cutilSafeCall(hipMalloc((void**) &dev_Dquant_tab,BLOCKS_PER_MB*sizeof(short))); // end1 = clock(); p_enc->new_timers.prep_encode_frame += (end1 - start1); if(p_enc->slice_params.slice_type == SLICE_I) { start = clock(); for (i = 0; i < num_mbs * BLOCKS_PER_MB; i++) { p_blk_mb_info[i].QP = QP; } InitQPDataAndTablesFromQP(&QpData, &pQuantTable, &pDQuantTable, QP, 1, 1); InitQPDataAndTablesFromQP(&QpData_chroma, &pQuantTable_chroma, &pDQuantTable_chroma, p_blk_mb_info->QP, 1, 0); cutilSafeCall(hipMemcpy(dev_QpData,&QpData,sizeof(S_QP_DATA),hipMemcpyHostToDevice)); cutilSafeCall(hipMemcpy(dev_Quant_tab,pQuantTable,BLOCKS_PER_MB*sizeof(short),hipMemcpyHostToDevice)); cutilSafeCall(hipMemcpy(dev_Dquant_tab,pDQuantTable,BLOCKS_PER_MB*sizeof(short),hipMemcpyHostToDevice)); cutilSafeCall(hipMemcpy(dev_input,p_input,MB_TOTAL_SIZE*num_mbs,hipMemcpyHostToDevice)); cutilSafeCall(hipMemcpy(dev_blk_mb_info,p_blk_mb_info,BLOCKS_PER_MB*sizeof(S_BLK_MB_INFO)*num_mbs,hipMemcpyHostToDevice)); dim3 grid(p_enc->i_slice_num, 1, 1); //grid dim3 threads(4, 4, 1); //blockthreads,threads16. dim3 block(4, 4, 16); hipLaunchKernelGGL(( iframe_luma_residual_coding) , dim3(grid),dim3(block), 0, 0, dev_input, enc_width, num_mb_hor, num_mb_ver, dev_recon + RECON_FRAME_Y_OFFSET* width_ref + RECON_FRAME_X_OFFSET, width_ref, dev_dct_coefs, dev_dc_coefs, dev_blk_mb_info, dev_Quant_tab, dev_Dquant_tab, dev_QpData, constrained_intra, intra_pred_select, p_enc->i_slice_num ); cutilSafeCall(hipMemcpy(dev_input_uv,p_input_u,MB_TOTAL_SIZE_C*num_mbs,hipMemcpyHostToDevice)); //U cutilSafeCall(hipMemcpy(dev_input_uv+MB_TOTAL_SIZE_C*num_mbs,p_input_v,MB_TOTAL_SIZE_C*num_mbs,hipMemcpyHostToDevice));//V cutilSafeCall(hipMemcpy(dev_QpData_uv,&QpData_chroma,sizeof(S_QP_DATA),hipMemcpyHostToDevice)); cutilSafeCall(hipMemcpy(Quant_tab_uv,pQuantTable_chroma,BLOCKS_PER_MB*sizeof(short),hipMemcpyHostToDevice)); cutilSafeCall(hipMemcpy(Dquant_tab_uv,pDQuantTable_chroma,BLOCKS_PER_MB*sizeof(short),hipMemcpyHostToDevice)); dim3 grid_chroma(p_enc->i_slice_num, 1, 1); dim3 threads_chroma(2, 4, 1); dim3 block_chroma(4, 2, 16); hipLaunchKernelGGL(( iframe_residual_coding_chroam), dim3(grid_chroma),dim3(block_chroma), 0, 0, dev_input_uv, dev_blk_mb_info, dev_QpData_uv, Quant_tab_uv, Dquant_tab_uv, dev_recon_uv+RECON_FRAME_Y_OFFSET_C * width_ref_c + RECON_FRAME_X_OFFSET_C, dev_dct_coefs_uv, dev_dc_coefs_uv, enc_width_c, enc_height_c, width_ref_c, height_ref_c, num_mb_hor, num_mb_ver, p_enc->i_slice_num ); dim3 grid_cbp_luma(num_mb_hor/8, num_mb_ver, 1); dim3 threads_cbp_luma(BLK_WIDTH, BLK_HEIGHT, 8); hipLaunchKernelGGL(( CalcCBP_and_TotalCoeff_Luma_cuda), dim3(grid_cbp_luma),dim3(threads_cbp_luma), 0, 0, dev_dct_coefs, dev_blk_mb_info); dim3 grid_cbp_chroma(num_mb_hor/8, num_mb_ver, 1); dim3 threads_cbp_chroma(BLOCKS_PER_MB_C, 8, 2); hipLaunchKernelGGL(( CalcCBP_and_TotalCoeff_Chroma_cuda), dim3(grid_cbp_chroma),dim3(threads_cbp_chroma), 0, 0, dev_dct_coefs_uv, dev_dc_coefs_uv, dev_blk_mb_info, enc_width_c, enc_height_c); cutilSafeCall(hipMemcpy(p_blk_mb_info,dev_blk_mb_info,BLOCKS_PER_MB*num_mbs*sizeof(S_BLK_MB_INFO),hipMemcpyDeviceToHost)); frame_sad_sum = 0; for(j = 0; j < num_mb_ver; j++) { for(i = 0; i < num_mb_hor; i++) { frame_sad_sum += (p_enc->pBlkMBInfo + (j * num_mb_hor + i) * BLOCKS_PER_MB)->MinSAD; } } p_enc->avg_mb_sad = frame_sad_sum / (num_mb_hor * num_mb_ver); p_enc->frame_info.num_intra_mb = num_mb_hor * num_mb_ver; end = clock(); p_enc->new_timers.iframe_residual += (end-start); } else { start = clock(); int num_mb_hor_ref,num_mb_ver_ref; int RefStride2Begin; int RefStride2BeginUV; int decimate_ratio = 2; int M = decimate_ratio; RC_CONTEXT *p_rc; ME_CONTEXT *p_me; CUVME_MV_RESULTS *p_me_mv_results; CUVME_MB_INFO *p_me_mb_info; CUVME_Y_BUFFER me_src; CUVME_Y_BUFFER *ptr_me_src; CUVME_MB_CHARAC *p_me_mb_characs; ME_CONTEXT *p_sc_me; ME_context_t *p_ME_context; int do_zero_search = 0; int do_low_res = 0; int do_int_search = 0; int do_int_and_halfpel_search = 0; int do_decimation_for_low_res = 0; unsigned char *i_InputLuma; SINT32 skipbias_factor; int avg_var; CUVME_Y_BUFFER me_ref; CUVME_Y_BUFFER me_pred; unsigned int AvgMbSAD = p_enc->avg_mb_sad; unsigned char *dev_input_ref; unsigned char *dev_out_HR_ref; unsigned char *dev_out_QR_ref; unsigned char *dev_input_src; unsigned char *dev_out_HR_src; unsigned char *dev_out_QR_src; CUVME_MV_RESULTS *dev_mvsLocal; CUVME_MB_INFO *dev_mb_info; unsigned int *IntegerPelCenterVecs; unsigned char *dev_out_pred; CUVME_MV_RESULTS *integer_mvmap; CUVME_MB_INFO *mb_info; int *dev_CoeffCosts; unsigned int lambda_factor_rc; lambda_factor_rc = QP2QUANT_NEW[p_enc->frame_info.frame_qp]; if(p_enc->intra_mb_level == 80) lambda_factor_rc <<= 8; else if(p_enc->intra_mb_level == 90) lambda_factor_rc <<= 7; else if(p_enc->intra_mb_level == 100) lambda_factor_rc <<= 5; else lambda_factor_rc <<= p_enc->intra_mb_level; const int ZigZag[16] = {0, 1, 4, 8, 5, 2, 3, 6, 9, 12, 13, 10, 7, 11, 14, 15}; const int CoeffCosts[16] = {3,2,2,1,1,1,0,0,0,0,0,0,0,0,0,0}; S_BLK_MB_INFO *pBlkMBInfo; E_ERR err = ERR_SUCCESS; CUVME_ERROR me_err = CUVME_ERR_SUCCESS; num_mb_hor_ref = width_ref/MB_WIDTH; num_mb_ver_ref = height_ref/MB_HEIGHT; RefStride2Begin = ((REFERENCE_FRAME_PAD_AMT * width_ref) + REFERENCE_FRAME_PAD_AMT); RefStride2BeginUV = RECON_FRAME_Y_OFFSET_C * width_ref_c + RECON_FRAME_X_OFFSET_C;/*((REFERENCE_FRAME_PAD_AMT/2 * p_enc->padded_ref_frame.width/2) + REFERENCE_FRAME_PAD_AMT/2);*/ p_me = &p_enc->me_context; p_rc = &p_enc->rc_context; p_me_mv_results = p_me->p_me_mv_results; p_me_mb_info = p_me->p_me_mb_info; p_me_mb_characs = p_me->p_me_mb_characs; err = ERR_SUCCESS; me_err = CUVME_ERR_SUCCESS; ptr_me_src = &me_src; ptr_me_src->y = p_enc->input_frame.y; ptr_me_src->buffer_width = p_enc->input_frame.width; ptr_me_src->buffer_height = p_enc->input_frame.height; ptr_me_src->active_width = p_enc->width; ptr_me_src->active_height = p_enc->height; ptr_me_src->offset_x = 0; ptr_me_src->offset_y = 0; me_pred.y = p_enc->inter_pred_frame.y; me_pred.buffer_width = p_enc->inter_pred_frame.width; me_pred.buffer_height = p_enc->inter_pred_frame.height; me_pred.active_width = p_enc->width; me_pred.active_height = p_enc->height; me_pred.offset_x = 0; me_pred.offset_y = 0; me_ref.y = p_enc->pRefFrame->y; me_ref.buffer_width = p_enc->pRefFrame->width; me_ref.buffer_height = p_enc->pRefFrame->height; me_ref.active_width = p_enc->width; me_ref.active_height = p_enc->height; me_ref.offset_x = REFERENCE_FRAME_PAD_AMT; me_ref.offset_y = REFERENCE_FRAME_PAD_AMT; // Transform and reconstruct InitQPDataAndTablesFromQP(&QpData_chroma, &pQuantTable_chroma, &pDQuantTable_chroma, p_enc->frame_info.frame_qp, 0, 0); InitQPDataAndTablesFromQP (&QpData, &pQuantTable, &pDQuantTable, p_enc->frame_info.frame_qp, 0, 1); p_ME_context = (ME_context_t *)p_me->me_handle; SINT32 lambda_factor; /********** decide between forward or backward reference frame to be used *******************/ // Setting Lambda //TODO: confirm this lambda_factor = QP2QUANT_MELIB[p_ME_context->FrameQP - 12]; skipbias_factor = 1; if(p_ME_context->FrameQP > 42) { skipbias_factor = 2; } me_err = cuvme_set_reference_frame(p_me->me_handle, &me_ref, 0, 0); if (!me_err) { me_err = cuvme_set_predicted_picture(p_me->me_handle, &me_pred); } if (!me_err) { if((p_enc->mb_adapt_qp_on == 1) || (p_enc->intra_mb_level == 0)) { me_err = cuvme_set_return_mb_characteristics(p_me->me_handle, p_me_mb_characs); } } //ME if(!me_err) { p_ME_context->ptr_src_picture = ptr_me_src; if(p_ME_context->nonpersistent_mem_givenby_app_flag) { me_assign_nonpersistentmem_pointers(p_ME_context); } if(p_ME_context->num_mvs == 1) { p_ME_context->ptr_mvs[0] = p_me_mv_results; } else { p_ME_context->ptr_mvs[0] = p_ME_context->ptr_mvs_local[0]; p_ME_context->ptr_res_out[0] = p_me_mv_results; } p_ME_context->ptr_mb_info[0] = p_me_mb_info; // We need to set all the MVs to zero only for zero mode if((p_ME_context->me_mode < 5) || (p_ME_context->CRef)) { unsigned int NumMbs = (p_ME_context->width / MB_WIDTH) * (p_ME_context->height / MB_HEIGHT); memset(p_ME_context->ptr_mvs[0], 0, NumMbs*sizeof(CUVME_MV_RESULTS)); memset(p_ME_context->ptr_mb_info[0], 0, NumMbs*sizeof(CUVME_MB_INFO)); } if(!p_ME_context->num_lowres_forw_references) { p_ME_context->forw_quarter_res_ref[0] = p_ME_context->malloced_forw_quarter_res_ref[0]; p_ME_context->forw_half_res_ref[0] = p_ME_context->malloced_forw_half_res_ref[0]; } if(!p_ME_context->num_lowres_back_references) { p_ME_context->back_quarter_res_ref[0] = p_ME_context->malloced_back_quarter_res_ref[0]; p_ME_context->back_half_res_ref[0] = p_ME_context->malloced_back_half_res_ref[0]; } if(p_ME_context->source_format == RASTER_ORDER) { { me_ConvertRasterToBlockFlattened(ptr_me_src->y, p_ME_context->block_flat_src->y, p_ME_context->width,p_ME_context->height); } p_ME_context->block_flat_src->buffer_width = ptr_me_src->buffer_width; p_ME_context->block_flat_src->buffer_height = ptr_me_src->buffer_height; p_ME_context->block_flat_src->active_width = ptr_me_src->active_width; p_ME_context->block_flat_src->active_height = ptr_me_src->active_height; p_ME_context->block_flat_src->offset_x = ptr_me_src->offset_x; p_ME_context->block_flat_src->offset_y = ptr_me_src->offset_y; p_ME_context->ptr_src_picture = p_ME_context->block_flat_src; } p_ME_context->do_zero_search = 0; p_ME_context->do_low_res = 0; p_ME_context->do_int_search = 0; p_ME_context->do_int_and_halfpel_search = 0; p_ME_context->do_decimation_for_low_res = 0; switch(p_ME_context->me_mode) { case 0: p_ME_context->do_zero_search = 1; do_zero_search = 1; break; case 5: case 10: p_ME_context->do_low_res = 1; do_low_res = 1; break; case 20: case 22: p_ME_context->do_low_res = 1; do_low_res = 1; p_ME_context->do_int_search = 1; do_int_search = 1; break; case 30: case 32: p_ME_context->do_low_res = 1; do_low_res = 1; p_ME_context->do_int_and_halfpel_search = 1; do_int_and_halfpel_search = 1; break; default: break; } if(!p_ME_context->num_lowres_forw_references) { p_ME_context->do_decimation_for_low_res = 1; do_decimation_for_low_res = 1; } if((p_ME_context->FrameQP - 12) > 0) p_ME_context->lambda_factor = QP2QUANT_MELIB[p_ME_context->FrameQP - 12];// else p_ME_context->lambda_factor = QP2QUANT_MELIB[0];// i_InputLuma = p_ME_context->ptr_src_picture->y; end = clock(); p_enc->new_timers.pframe_total +=(end -start); start = clock(); if(do_decimation_for_low_res) { cutilSafeCall(hipMalloc((void**) &dev_out_HR_ref,(height_ref/M)*(width_ref/M))); //1/21/4 cutilSafeCall(hipMalloc((void**) &dev_out_QR_ref,(height_ref/(2*M))*(width_ref/(2*M)))); cutilSafeCall(hipMalloc((void**) &dev_input_ref,width_ref*height_ref)); // cutilSafeCall(hipMemcpy(dev_input_ref,p_ME_context->ptr_forw_ref_frame[0]->y,width_ref*height_ref,hipMemcpyHostToDevice)); dim3 grid_ref(num_mb_hor_ref>>1,num_mb_ver_ref, 1); //grid dim3 threads_ref((MB_WIDTH*2)>>1, MB_HEIGHT>>1, 1); hipLaunchKernelGGL(( me_Decimate_kernel), dim3(grid_ref),dim3(threads_ref), 0, 0, dev_input_ref,dev_out_HR_ref,dev_out_QR_ref,height_ref,width_ref); } if(do_low_res) { cutilSafeCall(hipMemcpy(dev_input,p_enc->input_frame.y,MB_TOTAL_SIZE*num_mbs*sizeof(unsigned char),hipMemcpyHostToDevice)); cutilSafeCall(hipMalloc((void**) &dev_out_HR_src,(enc_width/M)*(enc_height/M))); //1/21/4 cutilSafeCall(hipMalloc((void**) &dev_out_QR_src,(enc_width/(2*M))*(enc_height/(2*M)))); cutilSafeCall(hipMalloc((void**) &dev_mvsLocal,num_mb_hor*num_mb_ver*sizeof(CUVME_MV_RESULTS))); cutilSafeCall(hipMalloc((void**) &dev_mb_info,num_mbs*sizeof(CUVME_MB_INFO))); // cutilSafeCall(hipMemcpy(dev_blk_mb_info,p_enc->pBlkMBInfo,BLOCKS_PER_MB*sizeof(S_BLK_MB_INFO)*num_mbs,hipMemcpyHostToDevice)); cutilSafeCall(hipMalloc((void**) &dev_input_src,enc_width*enc_height*sizeof(char))); // cutilSafeCall(hipMemcpy(dev_input_src,i_InputLuma,enc_width*enc_height*sizeof(char),hipMemcpyHostToDevice)); unsigned int *HR_SAD_dev; cutilSafeCall(hipMalloc((void**) &HR_SAD_dev,num_mb_hor*num_mb_ver*32*sizeof(unsigned int))); dim3 grid_src(num_mb_hor>>1,num_mb_ver, 1); //grid dim3 threads_src((MB_WIDTH*2)>>1, MB_HEIGHT>>1, 1); dim3 grid_QR(num_mb_hor/6,num_mb_ver/4,1); dim3 threads_QR(16,16,1); dim3 grid_QR_new(num_mb_hor/6,(num_mb_ver+2)/3,1); dim3 grid_HR_SAD(num_mb_hor,num_mb_ver,1); dim3 threads_HR_SAD(8,4,4); dim3 grid_HR(num_mb_hor/6,num_mb_ver/4,1); dim3 threads_HR(8,4,1); hipLaunchKernelGGL(( me_Decimate_kernel), dim3(grid_src),dim3(threads_src), 0, 0, dev_input,dev_out_HR_src,dev_out_QR_src,enc_height,enc_width); hipLaunchKernelGGL(( me_QR_LowresSearch), dim3(grid_QR_new),dim3(threads_QR), 0, 0, dev_out_QR_src, dev_out_QR_ref, dev_mvsLocal, enc_width/LOWRES_DEC_RATIO, enc_height/LOWRES_DEC_RATIO, width_ref/LOWRES_DEC_RATIO, height_ref/LOWRES_DEC_RATIO, num_mb_hor, num_mb_ver, 2*QR_WEIGHT, QR_SEARCH_SIZE, QR_ZERO_BIAS, lambda_factor, skipbias_factor ); hipLaunchKernelGGL(( me_HR_Cal_Candidate_SAD_kernel), dim3(grid_HR_SAD),dim3(threads_HR_SAD), 0, 0, dev_out_HR_src, dev_out_HR_ref, dev_mvsLocal, enc_width/HLFRES_DEC_RATIO, enc_height/HLFRES_DEC_RATIO, width_ref/HLFRES_DEC_RATIO, height_ref/HLFRES_DEC_RATIO, num_mb_hor, num_mb_ver, HR_SEARCH_SIZE, HR_SAD_dev ); dim3 grid_HR_new(num_mb_hor/6,(num_mb_ver+2)/3,1); hipLaunchKernelGGL(( me_HR_Candidate_Vote), dim3(grid_HR_new),dim3(threads_HR), 0, 0, HR_SAD_dev, dev_mvsLocal, dev_mb_info, enc_width/HLFRES_DEC_RATIO, enc_height/HLFRES_DEC_RATIO, width_ref/HLFRES_DEC_RATIO, height_ref/HLFRES_DEC_RATIO, num_mb_hor, num_mb_ver, 4*HR_WEIGHT, HR_SEARCH_SIZE, HR_ZERO_BIAS, lambda_factor, (skipbias_factor*3) ); cutilSafeCall(hipFree(HR_SAD_dev)); } if(do_int_and_halfpel_search||do_int_search) { cutilSafeCall(hipMalloc((void**) &IntegerPelCenterVecs,num_mb_hor*num_mb_ver*sizeof(int))); cutilSafeCall(hipMalloc((void**)&dev_out_pred,enc_width*enc_height*sizeof(unsigned char))); cutilSafeCall(hipMalloc((void**)&integer_mvmap,num_mb_hor*num_mb_ver*sizeof (CUVME_MV_RESULTS))); cutilSafeCall(hipMalloc((void**)&mb_info,num_mb_hor*num_mb_ver*sizeof(CUVME_MB_INFO))); dim3 grid_MV(1,num_mb_ver,1); dim3 threads_MV(num_mb_hor,1,1); dim3 grid_Int(num_mb_hor,1,1); dim3 threads_Int(16,3,3); hipLaunchKernelGGL(( me_ClipVec_ForFrame), dim3(grid_MV),dim3(threads_MV), 0, 0, dev_mvsLocal,IntegerPelCenterVecs,p_ME_context->search_range_x, p_ME_context->search_range_y, p_ME_context->candidate_tune_values.integer_clip_range,num_mb_hor,num_mb_ver/*,dev_ref_index,dev_MV*/); // hipLaunchKernelGGL(( me_IntegerSimulsadVote_kernel), dim3(grid_Int),dim3(threads_Int), 0, 0, dev_input_src,dev_input_ref,IntegerPelCenterVecs,integer_mvmap,mb_info,dev_out_pred, num_mb_hor,num_mb_ver,RefStride2Begin,lambda_factor,enc_width,width_ref, dev_blk_mb_info); cutilSafeCall(hipMemcpy(p_ME_context->p_pred_picture->y,dev_out_pred,enc_width*enc_height*sizeof(unsigned char),hipMemcpyDeviceToHost)); } p_ME_context->num_lowres_forw_references = 0; p_ME_context->num_lowres_back_references = 0; p_ME_context->LastAvgHRSAD = p_ME_context->AvgHRSAD; p_ME_context->num_sec_formats = 0; if(err == CUVME_ERR_SUCCESS) p_ME_context->curr_state = INIT; p_ME_context->flag_do_mc = 0; p_ME_context->store_dec_src_flag = 0; p_ME_context->get_mb_characs_flag = 0; p_ME_context->num_forw_references = 0; p_ME_context->num_back_references = 0; p_ME_context->ref_frame_distance_fwd = 0; p_ME_context->ref_frame_distance_bwd = 0; } p_sc_me = &p_enc->me_context; pBlkMBInfo = p_enc->pBlkMBInfo; if (!err) { me_err = cuvme_get_avg_var(p_sc_me->me_handle ,&avg_var); if (me_err) { printf ("ME returned error code %d", me_err); err = ERR_FAILURE; } if(!err) err = (E_ERR)cuvrc_set_avg_var(p_rc->rc_handle ,avg_var); if (err) { printf ("RC returned error code %d", err); err = ERR_FAILURE; } } end = clock(); p_enc->new_timers.me_total += (end-start); p_enc->new_timers.pframe_total +=(end -start); start = clock(); //P cutilSafeCall(hipMalloc((void**) &dev_CoeffCosts,BLOCKS_PER_MB*sizeof(int))); cutilSafeCall(hipMemcpy(dev_recon,(p_rec_ptr),enc_height*width_ref*sizeof(char)-RECON_FRAME_X_OFFSET,hipMemcpyHostToDevice)); cutilSafeCall(hipMemcpy(dev_CoeffCosts,CoeffCosts,sizeof(int)*16,hipMemcpyHostToDevice)); // cutilSafeCall(hipMemcpy(dev_QpData,&QpData,sizeof(S_QP_DATA),hipMemcpyHostToDevice)); cutilSafeCall(hipMemcpy(dev_Quant_tab,pQuantTable,BLOCKS_PER_MB*sizeof(short),hipMemcpyHostToDevice)); cutilSafeCall(hipMemcpy(dev_Dquant_tab,pDQuantTable,BLOCKS_PER_MB*sizeof(short),hipMemcpyHostToDevice)); dim3 grid_inter((num_mb_hor>>2),num_mb_ver,1); //block4block dim3 threads_inter(4,4,4); //block64block // cuda kernel dim3 grid_intra(p_enc->i_slice_num,1,1); dim3 threads_intra(4,4,1); // hipLaunchKernelGGL(( pframe_inter_resudial_coding_luma_kernel), dim3(grid_inter),dim3(threads_inter), 0, 0, dev_input, dev_out_pred, enc_width, dev_recon+RefStride2Begin, width_ref, dev_dct_coefs, dev_Quant_tab, dev_Dquant_tab, dev_QpData, dev_ZigZag, dev_CoeffCosts ); end = clock(); p_enc->new_timers.pframe_residual_inter += (end-start); p_enc->new_timers.pframe_residual_luma += (end-start); p_enc->new_timers.pframe_total += (end -start); start = clock(); // dim3 block_intra(4,4,16); hipLaunchKernelGGL(( pframe_intra_resudial_coding_luma), dim3(grid_intra),dim3(block_intra), 0, 0, dev_input, dev_out_pred, enc_width, dev_recon+RefStride2Begin, width_ref, dev_blk_mb_info, dev_dct_coefs, dev_dc_coefs, dev_Quant_tab, dev_Dquant_tab, dev_QpData, AvgMbSAD, lambda_factor_rc, num_mb_hor, num_mb_ver, p_enc->i_slice_num ); // end = clock(); p_enc->new_timers.pframe_residual_intra += (end-start); p_enc->new_timers.pframe_residual_luma += (end-start); p_enc->new_timers.pframe_total += (end -start); start = clock(); unsigned char *dev_pred_uv; unsigned char *dev_ref_uv; cutilSafeCall(hipMalloc((void**) &dev_ref_uv, (width_ref_c)*(height_ref_c)*2*sizeof(unsigned char))); cutilSafeCall(hipMalloc((void**) &dev_pred_uv, (enc_width_c)*(enc_height_c)*2*sizeof(unsigned char))); cutilSafeCall(hipMemcpy(dev_ref_uv, p_enc->pRefFrame->u,(width_ref_c)*(height_ref_c)*sizeof(unsigned char),hipMemcpyHostToDevice)); cutilSafeCall(hipMemcpy(dev_ref_uv+(width_ref_c)*(height_ref_c), p_enc->pRefFrame->v,(width_ref_c)*(height_ref_c)*sizeof(unsigned char),hipMemcpyHostToDevice)); dim3 grid_mcc(num_mb_hor,num_mb_ver,1); dim3 threads_mcc(8,8,2); hipLaunchKernelGGL(( MotionCompensateChroma_kernel), dim3(grid_mcc),dim3(threads_mcc), 0, 0, dev_ref_uv, dev_pred_uv, dev_blk_mb_info, enc_width_c, enc_height_c, width_ref_c, height_ref_c, RefStride2BeginUV ); cutilSafeCall(hipMemcpy(p_enc->inter_pred_frame.u,dev_pred_uv,(enc_width_c)*(enc_height_c)*sizeof(char),hipMemcpyDeviceToHost)); cutilSafeCall(hipMemcpy(p_enc->inter_pred_frame.v, dev_pred_uv+(enc_width_c)*(enc_height_c),(enc_width_c)*(enc_height_c)*sizeof(char),hipMemcpyDeviceToHost)); end = clock(); p_enc->new_timers.pframe_residual_chroma += (end-start); p_enc->new_timers.pframe_mc += (end-start); p_enc->new_timers.pframe_total += (end -start); start = clock(); cutilSafeCall(hipMemcpy(dev_input_uv,p_enc->input_frame.u,(enc_width_c)*(enc_height_c),hipMemcpyHostToDevice)); //U cutilSafeCall(hipMemcpy(dev_input_uv+(enc_width_c)*(enc_height_c),p_enc->input_frame.v,(enc_width_c)*(enc_height_c),hipMemcpyHostToDevice));//V cutilSafeCall(hipMemcpy(dev_QpData_uv,&QpData_chroma,sizeof(S_QP_DATA),hipMemcpyHostToDevice)); cutilSafeCall(hipMemcpy(Quant_tab_uv,pQuantTable_chroma,BLOCKS_PER_MB*sizeof(short),hipMemcpyHostToDevice)); cutilSafeCall(hipMemcpy(Dquant_tab_uv,pDQuantTable_chroma,BLOCKS_PER_MB*sizeof(short),hipMemcpyHostToDevice)); dim3 grid_intre_c((num_mb_hor>>1),num_mb_ver,1); //block28*84 dim3 threads_intre_c(8,4,2); //block643216 //kernel dim3 grid_intra_c(p_enc->i_slice_num,1,1); dim3 threads_intra_c(2,4,1); hipLaunchKernelGGL(( ChromaPFrameInterResidualCoding_kernel), dim3(grid_intre_c),dim3(threads_intre_c), 0, 0, dev_input_uv, dev_pred_uv, dev_recon_uv+RefStride2BeginUV, dev_dct_coefs_uv, dev_dc_coefs_uv, Quant_tab_uv, Dquant_tab_uv, dev_QpData_uv, enc_width_c, enc_height_c, width_ref_c, height_ref_c, num_mb_hor, num_mb_ver ); end = clock(); p_enc->new_timers.pframe_residual_chroma += (end-start); p_enc->new_timers.pframe_residual_inter += (end-start); p_enc->new_timers.pframe_total += (end -start); start = clock(); hipLaunchKernelGGL(( Chroma_PFrame_Intra_ResidualCoding_kernel), dim3(grid_intra_c),dim3(threads_intra_c), 0, 0, dev_input_uv, dev_recon_uv+RefStride2BeginUV, dev_blk_mb_info, dev_dct_coefs_uv, dev_dc_coefs_uv, Quant_tab_uv, Dquant_tab_uv, dev_QpData_uv, enc_width_c, enc_height_c, width_ref_c, height_ref_c, num_mb_hor, num_mb_ver, p_enc->i_slice_num ); end = clock(); p_enc->new_timers.pframe_residual_chroma += (end-start); p_enc->new_timers.pframe_residual_intra += (end-start); p_enc->new_timers.pframe_total += (end -start); start = clock(); dim3 grid_cbp_luma(num_mb_hor/8, num_mb_ver, 1); dim3 threads_cbp_luma(BLK_WIDTH, BLK_HEIGHT, 8); hipLaunchKernelGGL(( CalcCBP_and_TotalCoeff_Luma_cuda), dim3(grid_cbp_luma),dim3(threads_cbp_luma), 0, 0, dev_dct_coefs, dev_blk_mb_info); dim3 grid_cbp_chroma(num_mb_hor/8, num_mb_ver, 1); dim3 threads_cbp_chroma(BLOCKS_PER_MB_C, 8, 2); hipLaunchKernelGGL(( CalcCBP_and_TotalCoeff_Chroma_cuda), dim3(grid_cbp_chroma),dim3(threads_cbp_chroma), 0, 0, dev_dct_coefs_uv, dev_dc_coefs_uv, dev_blk_mb_info,enc_width_c, enc_height_c); cutilSafeCall(hipMemcpy(p_enc->pBlkMBInfo,dev_blk_mb_info,BLOCKS_PER_MB*num_mbs*sizeof(S_BLK_MB_INFO),hipMemcpyDeviceToHost)); cutilSafeCall(hipFree(dev_pred_uv)); cutilSafeCall(hipFree(dev_ref_uv)); cutilSafeCall(hipFree(dev_input_ref)); cutilSafeCall(hipFree(dev_input_src)); cutilSafeCall(hipFree(dev_out_HR_src)); cutilSafeCall(hipFree(dev_out_HR_ref)); cutilSafeCall(hipFree(dev_out_QR_src)); cutilSafeCall(hipFree(dev_out_QR_ref)); cutilSafeCall(hipFree(dev_mvsLocal)); cutilSafeCall(hipFree(dev_mb_info)); cutilSafeCall(hipFree(IntegerPelCenterVecs)); cutilSafeCall(hipFree(integer_mvmap)); cutilSafeCall(hipFree(mb_info)); cutilSafeCall(hipFree(dev_out_pred)); cutilSafeCall(hipFree(dev_CoeffCosts)); end = clock(); p_enc->new_timers.pframe_residual_chroma += (end-start); p_enc->new_timers.pframe_residual_intra += (end-start); p_enc->new_timers.pframe_total += (end -start); start = clock(); frame_sad_sum = 0; for(j = 0; j < num_mb_ver; j++) { for(i = 0; i < num_mb_hor; i++) { frame_sad_sum += (pBlkMBInfo + (j * num_mb_hor + i) * BLOCKS_PER_MB)->MinSAD; } } p_enc->avg_mb_sad = frame_sad_sum / (num_mb_hor * num_mb_ver); end = clock(); p_enc->new_timers.pframe_total += (end -start); } //CAVLC implementation based cuda int I_Slice, FrameQP; bitstream_t *pBitstream; int MBx, MBy; int PrevQP; unsigned int PackedCount; int pPackedSize; unsigned int *pPacked; unsigned int *pPackedCurr; int dummy; int num_encoded_mbs; int MBNum; int *PrevSkipMB = (int *)malloc(p_enc->i_slice_num*sizeof(int)); int *header_bits =(int *)malloc(sizeof(int)) ; int *texture_bits= (int *)malloc(p_enc->i_slice_num*sizeof(int)); ///////////////////////////////////////////////////////////////// // Declare temporary buffers and pointers ///////////////////////////////////////////////////////////////// //int leftover_numbits; unsigned int leftover_value; // Read necessary information from encoder context struct (p_enc) num_encoded_mbs = 0; // Bitstream buffer, before inserting into pBitstream pPackedSize = p_enc->bitstream.buffer_size / 4; pPacked = (unsigned int *)malloc(sizeof(unsigned int) * pPackedSize); pPackedCurr = pPacked; I_Slice = p_enc->frame_info.idr_flag; int Slice_num = p_enc->i_slice_num; *header_bits=0; *texture_bits=0; //leftover_numbits = 0; leftover_value = 0; start = clock(); //short *pDcCoefs_ChromaDC; int *ZigZag_tab; S_CAVLC_CONTEXT_DC_CHROMA *pMBContextOut_LumaDC_dev; S_CAVLC_CONTEXT_DC_CHROMA *pMBContextOut_ChromaDC_dev; int *SkipBlock; int *PrevSkipMB_dev; S_CAVLC_CONTEXT_BLOCK *pMBContextOut_LumaAC_dev; //short *pDctCoefs_ChromaAC; short *pDctCoefs_ZigZag_ChromaAC; S_CAVLC_CONTEXT_DC_CHROMA *pMBContextOut_ChromaAC_dev; S_TEXTURE_SYMBOLS_BLOCK *pTextureSymbols_LumaDC_dev; S_LEVEL_SUFFIX_LENGTH0_SYMBOL_BLOCK *pLevelSymbolSuffixLength0_LumaDC_dev; S_LEVEL_SYMBOLS_BLOCK *pLevelSymbols_LumaDC_dev; S_RUN_SYMBOLS_BLOCK *pRunSymbols_LumaDC_dev; S_TEXTURE_SYMBOLS_BLOCK *pTextureSymbols_LumaAC_dev; S_LEVEL_SUFFIX_LENGTH0_SYMBOL_BLOCK *pLevelSymbolSuffixLength0_LumaAC_dev; S_LEVEL_SYMBOLS_BLOCK *pLevelSymbols_LumaAC_dev; S_RUN_SYMBOLS_BLOCK *pRunSymbols_LumaAC_dev; S_TEXTURE_SYMBOLS_BLOCK *pTextureSymbols_ChromaDC_dev; S_LEVEL_SUFFIX_LENGTH0_SYMBOL_BLOCK *pLevelSymbolSuffixLength0_ChromaDC_dev; S_LEVEL_SYMBOLS_BLOCK *pLevelSymbols_ChromaDC_dev; S_RUN_SYMBOLS_BLOCK *pRunSymbols_ChromaDC_dev; S_TEXTURE_SYMBOLS_BLOCK *pTextureSymbols_ChromaAC_dev; S_LEVEL_SUFFIX_LENGTH0_SYMBOL_BLOCK *pLevelSymbolSuffixLength0_ChromaAC_dev; S_LEVEL_SYMBOLS_BLOCK *pLevelSymbols_ChromaAC_dev; S_RUN_SYMBOLS_BLOCK *pRunSymbols_ChromaAC_dev; SINGLE_CODE *pCodes_LumaDC_dev; SINGLE_CODE *pCodes_LumaAC_dev; SINGLE_CODE *pCodes_ChromaAC_dev; unsigned char *CoeffTokenTable_dev; unsigned char *TotalZerosTable_dev; unsigned int *RunIndexTable_dev; unsigned char *RunTable_dev; cutilSafeCall(hipMalloc((void**) &pCodes_LumaDC_dev,CODE_PAIRS_PER_LANE*num_mbs*sizeof(SINGLE_CODE))); cutilSafeCall(hipMalloc((void**) &pCodes_LumaAC_dev,BLOCKS_PER_MB*CODE_PAIRS_PER_LANE*num_mbs*sizeof(SINGLE_CODE))); cutilSafeCall(hipMalloc((void**) &pCodes_ChromaAC_dev,8*CODE_PAIRS_PER_LANE*num_mbs*sizeof(SINGLE_CODE))); cutilSafeCall(hipMalloc((void**) &CoeffTokenTable_dev,3*4*17*2*sizeof(unsigned char))); cutilSafeCall(hipMalloc((void**) &TotalZerosTable_dev,15*16*2*sizeof(unsigned char))); cutilSafeCall(hipMalloc((void**) &RunIndexTable_dev,7*sizeof(unsigned int))); cutilSafeCall(hipMalloc((void**) &RunTable_dev,44*2*sizeof(unsigned char))); cutilSafeCall(hipMemcpy(CoeffTokenTable_dev,CoeffTokenTable,3*4*17*2*sizeof(unsigned char),hipMemcpyHostToDevice)); cutilSafeCall(hipMemcpy(TotalZerosTable_dev,TotalZerosTable,15*16*2*sizeof(unsigned char),hipMemcpyHostToDevice)); cutilSafeCall(hipMemcpy(RunIndexTable_dev,RunIndexTable,7*sizeof(unsigned int),hipMemcpyHostToDevice)); cutilSafeCall(hipMemcpy(RunTable_dev,RunTable,44*2*sizeof(unsigned char),hipMemcpyHostToDevice)); cutilSafeCall(hipMemset(pCodes_LumaDC_dev,0,CODE_PAIRS_PER_LANE*num_mbs*sizeof(SINGLE_CODE))); cutilSafeCall(hipMemset(pCodes_LumaAC_dev,0,16*CODE_PAIRS_PER_LANE*num_mbs*sizeof(SINGLE_CODE))); cutilSafeCall(hipMemset(pCodes_ChromaAC_dev,0,8*CODE_PAIRS_PER_LANE*num_mbs*sizeof(SINGLE_CODE))); cutilSafeCall(hipMalloc((void**) &pMBContextOut_LumaDC_dev,num_mbs*sizeof(S_CAVLC_CONTEXT_DC_CHROMA))); cutilSafeCall(hipMalloc((void**) &pMBContextOut_ChromaDC_dev,num_mbs*sizeof(S_CAVLC_CONTEXT_DC_CHROMA))); //block texture for DC dim3 threads_blk_dc(16, 5, 1); dim3 grid_blk_dc(num_mbs/80, 1, 1); hipLaunchKernelGGL(( cavlc_block_context_DC_kernel) , dim3(grid_blk_dc),dim3(threads_blk_dc), 0, 0, dev_dc_coefs, dev_blk_mb_info, dev_ZigZag/*ZigZag_tab*/, dev_dc_coefs, pMBContextOut_LumaDC_dev, pMBContextOut_ChromaDC_dev, num_mb_hor ); //block contexture for Luma AC cutilSafeCall(hipMalloc((void**) &pMBContextOut_LumaAC_dev,BLOCKS_PER_MB*num_mbs*sizeof(S_CAVLC_CONTEXT_BLOCK))); cutilSafeCall(hipMalloc((void**) &SkipBlock,sizeof(int)*num_mbs)); cutilSafeCall(hipMalloc((void**) &PrevSkipMB_dev,sizeof(int)*Slice_num)); dim3 threads_luma_ac(16, 8, 1); dim3 grid_luma_ac(num_mb_hor/8, num_mb_ver, 1); if(I_Slice ) { cutilSafeCall(hipMemset(SkipBlock,0,sizeof(int)*num_mbs)); cutilSafeCall(hipMemset(PrevSkipMB_dev,0,sizeof(int)*Slice_num)); hipLaunchKernelGGL(( cavlc_block_context_iframe_LumaAC_kernel), dim3(grid_luma_ac),dim3(threads_luma_ac), 0, 0, dev_dct_coefs, dev_blk_mb_info, dev_ZigZag, dev_dct_coefs, pMBContextOut_LumaAC_dev,\ num_mb_hor ); } else { hipLaunchKernelGGL(( cavlc_block_context_iframe_LumaAC_kernel), dim3(grid_luma_ac),dim3(threads_luma_ac), 0, 0, dev_dct_coefs, dev_blk_mb_info, dev_ZigZag, dev_dct_coefs, pMBContextOut_LumaAC_dev, num_mb_hor ); dim3 threads_mv(80, 1, 1); dim3 grid_mv(num_mbs/80, 1, 1); hipLaunchKernelGGL(( CalcPredictedMVRef_16x16_kernel), dim3(grid_mv),dim3(threads_mv), 0, 0, dev_blk_mb_info, pMBContextOut_LumaAC_dev, SkipBlock, num_mb_hor ); dim3 threads_skip(16, 1, 1); dim3 grid_skip(Slice_num, 1, 1); hipLaunchKernelGGL(( cavlc_block_context_PrevSkipMB_kernel), dim3(grid_skip),dim3(threads_skip), 0, 0, SkipBlock, PrevSkipMB_dev, pMBContextOut_LumaAC_dev, num_mbs ); } //block contexture for chroma AC //cutilSafeCall(hipMalloc((void**) &pDctCoefs_ChromaAC,MB_TOTAL_SIZE_C*2*num_mbs*sizeof(short))); cutilSafeCall(hipMalloc((void**) &pDctCoefs_ZigZag_ChromaAC,MB_TOTAL_SIZE_C*2*num_mbs*sizeof(short))); cutilSafeCall(hipMalloc((void**) &pMBContextOut_ChromaAC_dev,BLOCKS_PER_MB_C*2*num_mbs*sizeof(S_CAVLC_CONTEXT_DC_CHROMA))); dim3 threads_blk_chrac(16, 4, 2); dim3 grid_blk_chrac(num_mbs/16, 1, 1); hipLaunchKernelGGL(( cavlc_block_context_ChromaAC_kernel) , dim3(grid_blk_chrac),dim3(threads_blk_chrac), 0, 0, dev_dct_coefs_uv, dev_blk_mb_info, dev_ZigZag, pDctCoefs_ZigZag_ChromaAC, pMBContextOut_ChromaAC_dev, num_mb_hor, num_mb_ver ); //texture symbols for luma DC cutilSafeCall(hipMalloc((void**) &pTextureSymbols_LumaDC_dev,num_mbs*sizeof(S_TEXTURE_SYMBOLS_BLOCK))); cutilSafeCall(hipMalloc((void**) &pLevelSymbolSuffixLength0_LumaDC_dev,num_mbs*sizeof(S_LEVEL_SUFFIX_LENGTH0_SYMBOL_BLOCK))); cutilSafeCall(hipMalloc((void**) &pLevelSymbols_LumaDC_dev,BLOCKS_PER_MB*num_mbs*sizeof(S_LEVEL_SYMBOLS_BLOCK))); cutilSafeCall(hipMalloc((void**) &pRunSymbols_LumaDC_dev,BLOCKS_PER_MB*num_mbs*sizeof(S_RUN_SYMBOLS_BLOCK))); dim3 threads_sym_luma_dc(16, 5, 1); dim3 grid_sym_luma_dc(num_mbs/80, 1, 1); hipLaunchKernelGGL(( cavlc_texture_symbols_luma_DC_kernel) , dim3(grid_sym_luma_dc),dim3(threads_sym_luma_dc), 0, 0, dev_dc_coefs, pMBContextOut_LumaDC_dev, SkipBlock, pTextureSymbols_LumaDC_dev, pLevelSymbolSuffixLength0_LumaDC_dev, pLevelSymbols_LumaDC_dev, pRunSymbols_LumaDC_dev ); //texture symbols for luma ac cutilSafeCall(hipMalloc((void**) &pTextureSymbols_LumaAC_dev,num_mbs*BLK_SIZE*sizeof(S_TEXTURE_SYMBOLS_BLOCK))); cutilSafeCall(hipMalloc((void**) &pLevelSymbolSuffixLength0_LumaAC_dev,num_mbs*BLK_SIZE*sizeof(S_LEVEL_SUFFIX_LENGTH0_SYMBOL_BLOCK))); cutilSafeCall(hipMalloc((void**) &pLevelSymbols_LumaAC_dev,BLOCKS_PER_MB*num_mbs*BLK_SIZE*sizeof(S_LEVEL_SYMBOLS_BLOCK))); cutilSafeCall(hipMalloc((void**) &pRunSymbols_LumaAC_dev,BLOCKS_PER_MB*num_mbs*BLK_SIZE*sizeof(S_RUN_SYMBOLS_BLOCK))); dim3 threads_sym_luma_ac(16, 8, 1); dim3 grid_sym_luma_ac(num_mb_hor/8, num_mb_ver, 1); hipLaunchKernelGGL(( cavlc_texture_symbols_luma_AC_kernel) , dim3(grid_sym_luma_ac),dim3(threads_sym_luma_ac), 0, 0, dev_dct_coefs, pMBContextOut_LumaAC_dev, SkipBlock, pTextureSymbols_LumaAC_dev, pLevelSymbolSuffixLength0_LumaAC_dev, pLevelSymbols_LumaAC_dev, pRunSymbols_LumaAC_dev ); cutilSafeCall(hipMalloc((void**) &pTextureSymbols_ChromaDC_dev,num_mbs*2*sizeof(S_TEXTURE_SYMBOLS_BLOCK))); cutilSafeCall(hipMalloc((void**) &pLevelSymbolSuffixLength0_ChromaDC_dev,num_mbs*2*sizeof(S_LEVEL_SUFFIX_LENGTH0_SYMBOL_BLOCK))); cutilSafeCall(hipMalloc((void**) &pLevelSymbols_ChromaDC_dev,BLOCKS_PER_MB_C*2*num_mbs*sizeof(S_LEVEL_SYMBOLS_BLOCK))); cutilSafeCall(hipMalloc((void**) &pRunSymbols_ChromaDC_dev,BLOCKS_PER_MB_C*2*num_mbs*sizeof(S_RUN_SYMBOLS_BLOCK))); dim3 threads_sym_chroma_dc(16, 5, 2); dim3 grid_sym_chroma_dc(num_mbs/80, 1, 1); hipLaunchKernelGGL(( cavlc_texture_symbols_chroma_DC_kernel) , dim3(grid_sym_chroma_dc),dim3(threads_sym_chroma_dc), 0, 0, dev_dc_coefs_uv, pMBContextOut_ChromaDC_dev, SkipBlock, pTextureSymbols_ChromaDC_dev, pLevelSymbolSuffixLength0_ChromaDC_dev, pLevelSymbols_ChromaDC_dev, pRunSymbols_ChromaDC_dev, num_mbs ); //texture symbols for chroma ac cutilSafeCall(hipMalloc((void**) &pTextureSymbols_ChromaAC_dev,BLOCKS_PER_MB_C*num_mbs*2*sizeof(S_TEXTURE_SYMBOLS_BLOCK))); cutilSafeCall(hipMalloc((void**) &pLevelSymbolSuffixLength0_ChromaAC_dev,BLOCKS_PER_MB_C*num_mbs*2*sizeof(S_LEVEL_SUFFIX_LENGTH0_SYMBOL_BLOCK))); cutilSafeCall(hipMalloc((void**) &pLevelSymbols_ChromaAC_dev,MB_TOTAL_SIZE_C*2*num_mbs*sizeof(S_LEVEL_SYMBOLS_BLOCK))); cutilSafeCall(hipMalloc((void**) &pRunSymbols_ChromaAC_dev,MB_TOTAL_SIZE_C*2*num_mbs*sizeof(S_RUN_SYMBOLS_BLOCK))); dim3 threads_sym_chroma_ac(16, 5, 1); dim3 grid_sym_chroma_ac(num_mb_hor/10, num_mb_ver, 1); hipLaunchKernelGGL(( cavlc_texture_symbols_chroma_AC_kernel) , dim3(grid_sym_chroma_ac),dim3(threads_sym_chroma_ac), 0, 0, pDctCoefs_ZigZag_ChromaAC, pMBContextOut_ChromaAC_dev, SkipBlock, pTextureSymbols_ChromaAC_dev, pLevelSymbolSuffixLength0_ChromaAC_dev, pLevelSymbols_ChromaAC_dev, pRunSymbols_ChromaAC_dev ); dim3 threads_code_luma_dc(16, 5, 1); dim3 grid_code_luma_dc(num_mbs/80, 1, 1); hipLaunchKernelGGL(( cavlc_texture_codes_luma_DC_kernel) , dim3(grid_code_luma_dc),dim3(threads_code_luma_dc), 0, 0, pTextureSymbols_LumaDC_dev, pLevelSymbolSuffixLength0_LumaDC_dev, pLevelSymbols_LumaDC_dev, pRunSymbols_LumaDC_dev, CoeffTokenTable_dev, TotalZerosTable_dev, RunIndexTable_dev, RunTable_dev, SkipBlock, pCodes_LumaDC_dev, texture_bits, num_mbs ); dim3 threads_code_luma_ac(16, 5, 1); dim3 grid_code_luma_ac(num_mb_hor/5, num_mb_ver, 1); hipLaunchKernelGGL(( cavlc_texture_codes_luma_DC_kernel) , dim3(grid_code_luma_ac),dim3(threads_code_luma_ac), 0, 0, pTextureSymbols_LumaAC_dev, pLevelSymbolSuffixLength0_LumaAC_dev, pLevelSymbols_LumaAC_dev, pRunSymbols_LumaAC_dev, CoeffTokenTable_dev, TotalZerosTable_dev, RunIndexTable_dev, RunTable_dev, SkipBlock, pCodes_LumaAC_dev, texture_bits, num_mbs ); dim3 threads_chr(16, 5, 1); dim3 grid_chr(num_mb_hor/10, num_mb_ver, 1); hipLaunchKernelGGL(( cavlc_texture_codes_luma_DC_kernel) , dim3(grid_chr),dim3(threads_chr), 0, 0, pTextureSymbols_ChromaAC_dev, pLevelSymbolSuffixLength0_ChromaAC_dev, pLevelSymbols_ChromaAC_dev, pRunSymbols_ChromaAC_dev, CoeffTokenTable_dev, TotalZerosTable_dev, RunIndexTable_dev, RunTable_dev, SkipBlock, pCodes_ChromaAC_dev, texture_bits, num_mbs ); SINGLE_CODE *pCodes_ChromaDC_dev; unsigned char *CoeffTokenChromaDCTable_dev; unsigned char *TotalZerosChromaDCTable_dev; cutilSafeCall(hipMalloc((void**) &pCodes_ChromaDC_dev,8*2*num_mbs*sizeof(SINGLE_CODE))); cutilSafeCall(hipMalloc((void**) &CoeffTokenChromaDCTable_dev,4*5*2*sizeof(unsigned char))); cutilSafeCall(hipMalloc((void**) &TotalZerosChromaDCTable_dev,3*4*2*sizeof(unsigned char))); cutilSafeCall(hipMemcpy(CoeffTokenChromaDCTable_dev,CoeffTokenChromaDCTable,4*5*2*sizeof(unsigned char),hipMemcpyHostToDevice)); cutilSafeCall(hipMemcpy(TotalZerosChromaDCTable_dev,TotalZerosChromaDCTable,3*4*2*sizeof(unsigned char),hipMemcpyHostToDevice)); cutilSafeCall(hipMemset(pCodes_ChromaDC_dev,0,8*2*num_mbs*sizeof(SINGLE_CODE))); dim3 threads_code_chroma_dc(16, 10, 1); dim3 grid_code_chroma_dc(num_mbs/80, 1, 1); hipLaunchKernelGGL(( cavlc_texture_codes_chroam_DC_kernel), dim3(grid_code_chroma_dc),dim3(threads_code_chroma_dc), 0, 0, pTextureSymbols_ChromaDC_dev, pLevelSymbolSuffixLength0_ChromaDC_dev, pLevelSymbols_ChromaDC_dev, pRunSymbols_ChromaDC_dev, CoeffTokenChromaDCTable_dev, TotalZerosChromaDCTable_dev, RunIndexTable_dev, RunTable_dev, SkipBlock, pCodes_ChromaDC_dev, texture_bits ); //////////////////////////////////////////////////////////////////////////////////////////////////////////////// cutilSafeCall(hipMemcpy(PrevSkipMB,PrevSkipMB_dev,Slice_num*sizeof(int),hipMemcpyDeviceToHost)); // unsigned char *CBPTable_dev; int *HeaderCodeBits_dev; SINGLE_CODE *pCodes_Header_MB_dev; //SINGLE_CODE *pCodes_Header_MB_dev; unsigned int *packed_words_head_dev; unsigned int *word_count_head_dev; int *leftover_numbits_head_dev; unsigned int *leftover_value_head_dev; unsigned int *packed_words_LDC_dev; unsigned int *word_count_LDC_dev; int *leftover_numbits_LDC_dev; unsigned int *leftover_value_LDC_dev; unsigned int *packed_words_LAC_dev; unsigned int *word_count_LAC_dev; int *leftover_numbits_LAC_dev; unsigned int *leftover_value_LAC_dev; unsigned int *packed_words_CDC_dev; unsigned int *word_count_CDC_dev; int *leftover_numbits_CDC_dev; unsigned int *leftover_value_CDC_dev; unsigned int *packed_words_CAC_dev; unsigned int *word_count_CAC_dev; int *leftover_numbits_CAC_dev; unsigned int *leftover_value_CAC_dev; unsigned int *total_packet_word_mb; unsigned int *total_word_count_mb; int *total_leftover_numbits_mb; unsigned int *total_leftover_value_mb; int *shift_bits_dev; unsigned int *out_index_dev; unsigned int *total_packet_word; int *leftover_numbits_slice; unsigned int *leftover_value_slice; unsigned int *word_num_slice; cutilSafeCall(hipMalloc((void**) &CBPTable_dev,CBP_TABLE_SIZE*sizeof(unsigned char))); cutilSafeCall(hipMalloc((void**) &HeaderCodeBits_dev,num_mbs*sizeof( int))); cutilSafeCall(hipMemcpy(CBPTable_dev,CBPTable,CBP_TABLE_SIZE*sizeof(unsigned char),hipMemcpyHostToDevice)); cutilSafeCall(hipMemcpy(dev_ZigZag,BlockScan,16*sizeof(unsigned int),hipMemcpyHostToDevice)); cutilSafeCall(hipMalloc((void**) &pCodes_Header_MB_dev,11*num_mbs*sizeof(SINGLE_CODE))); cutilSafeCall(hipMalloc((void**) &packed_words_head_dev,6*num_mbs*sizeof(unsigned int))); cutilSafeCall(hipMalloc((void**) &word_count_head_dev,num_mbs*sizeof(unsigned int))); cutilSafeCall(hipMalloc((void**) &leftover_numbits_head_dev,num_mbs*sizeof( int))); cutilSafeCall(hipMalloc((void**) &leftover_value_head_dev,num_mbs*sizeof(unsigned int))); cutilSafeCall(hipMalloc((void**) &leftover_numbits_slice,(p_enc->i_slice_num+1)*sizeof (int))); cutilSafeCall(hipMalloc((void**) &leftover_value_slice,p_enc->i_slice_num*sizeof(unsigned int))); cutilSafeCall(hipMalloc((void**) &word_num_slice,p_enc->i_slice_num*sizeof(unsigned int))); cutilSafeCall(hipMemset(pCodes_Header_MB_dev,0,11*num_mbs*sizeof(SINGLE_CODE))); dim3 block_header(num_mb_hor,1,1); dim3 grid_header(num_mb_ver,1,1); int Max_size_head = 11;//P frame mb int shared_mem_size = ((Max_size_head+1)>>1) * num_mb_hor*4; if (I_Slice!=0) //I frame { hipLaunchKernelGGL(( cavlc_header_codes_Iframe) , dim3(grid_header),dim3(block_header), 0, 0, pMBContextOut_LumaAC_dev, dev_ZigZag, CBPTable_dev, 8, pCodes_Header_MB_dev, //8 element for a I MB 1+4+1+1+1(mbtype,subtype(4*4),CHROMAMODE,CBP,delta_quant) HeaderCodeBits_dev ); } else //p frame { hipLaunchKernelGGL(( cavlc_header_codes_Pframe) , dim3(grid_header),dim3(block_header), 0, 0, pMBContextOut_LumaAC_dev, SkipBlock, dev_ZigZag, CBPTable_dev, 11, pCodes_Header_MB_dev, //8 element for a I MB 1+4+1+1+1(mbtype,subtype(4*4),CHROMAMODE,CBP,delta_quant) HeaderCodeBits_dev ); } hipLaunchKernelGGL(( cavlc_bitpack_block_cu), dim3(grid_header),dim3(block_header),shared_mem_size, 0, pCodes_Header_MB_dev, ((I_Slice) ? 8 : 11), packed_words_head_dev, word_count_head_dev, leftover_numbits_head_dev, leftover_value_head_dev ); int shared_mem_size_ldc = 13*4*num_mb_hor; int shared_mem_size_cdc = 8*4*num_mb_hor; int shared_mem_size_ac = 13*4*128; cutilSafeCall(hipMalloc((void**) &packed_words_LDC_dev,13*num_mbs*sizeof(unsigned int))); cutilSafeCall(hipMalloc((void**) &word_count_LDC_dev,num_mbs*sizeof(unsigned int))); cutilSafeCall(hipMalloc((void**) &leftover_numbits_LDC_dev,num_mbs*sizeof( int))); cutilSafeCall(hipMalloc((void**) &leftover_value_LDC_dev,num_mbs*sizeof(unsigned int))); cutilSafeCall(hipMalloc((void**) &packed_words_LAC_dev,13*num_mbs*BLOCKS_PER_MB*sizeof(unsigned int))); cutilSafeCall(hipMalloc((void**) &word_count_LAC_dev,num_mbs*BLOCKS_PER_MB*sizeof(unsigned int))); cutilSafeCall(hipMalloc((void**) &leftover_numbits_LAC_dev,num_mbs*BLOCKS_PER_MB*sizeof( int))); cutilSafeCall(hipMalloc((void**) &leftover_value_LAC_dev,num_mbs*BLOCKS_PER_MB*sizeof(unsigned int))); cutilSafeCall(hipMalloc((void**) &packed_words_CDC_dev,8*num_mbs*sizeof(unsigned int))); cutilSafeCall(hipMalloc((void**) &word_count_CDC_dev,num_mbs*sizeof(unsigned int))); cutilSafeCall(hipMalloc((void**) &leftover_numbits_CDC_dev,num_mbs*sizeof( int))); cutilSafeCall(hipMalloc((void**) &leftover_value_CDC_dev,num_mbs*sizeof(unsigned int))); cutilSafeCall(hipMalloc((void**) &packed_words_CAC_dev,13*num_mbs*BLOCKS_PER_MB_C*2*sizeof(unsigned int))); cutilSafeCall(hipMalloc((void**) &word_count_CAC_dev,num_mbs*BLOCKS_PER_MB_C*2*sizeof(unsigned int))); cutilSafeCall(hipMalloc((void**) &leftover_numbits_CAC_dev,num_mbs*BLOCKS_PER_MB_C*2*sizeof( int))); cutilSafeCall(hipMalloc((void**) &leftover_value_CAC_dev,num_mbs*BLOCKS_PER_MB_C*2*sizeof(unsigned int))); cutilSafeCall(hipMalloc((void**) &total_packet_word_mb,num_mbs*64*sizeof(unsigned int))); cutilSafeCall(hipMalloc((void**) &total_word_count_mb,num_mbs*sizeof(unsigned int))); cutilSafeCall(hipMalloc((void**) &total_leftover_numbits_mb,num_mbs*sizeof( int))); cutilSafeCall(hipMalloc((void**) &total_leftover_value_mb,num_mbs*sizeof(unsigned int))); cutilSafeCall(hipMalloc((void**) &shift_bits_dev,num_mbs*sizeof(int))); cutilSafeCall(hipMalloc((void**) &out_index_dev,num_mbs*sizeof(unsigned int))); cutilSafeCall(hipMalloc((void**) &total_packet_word,num_mbs*64*sizeof(unsigned int))); cutilSafeCall(hipMemset(total_packet_word_mb,0,num_mbs*64*sizeof(unsigned int))); //kernel dim3 block_ldc(num_mb_hor,1,1); dim3 grid_ldc(num_mb_ver,1,1); //Lumakernel dim3 block_lac(128,1,1); dim3 grid_lac((num_mbs*BLOCKS_PER_MB/128),1,1); //Chromakernel dim3 block_cac(128,1,1); dim3 grid_cac((num_mbs*BLOCKS_PER_MB_C/64),1,1); //Luma Dc hipLaunchKernelGGL(( cavlc_bitpack_block_cu), dim3(grid_ldc),dim3(block_ldc),shared_mem_size_ldc, 0, pCodes_LumaDC_dev, 26, packed_words_LDC_dev, word_count_LDC_dev, leftover_numbits_LDC_dev, leftover_value_LDC_dev ); //Chroma Dc hipLaunchKernelGGL(( cavlc_bitpack_block_cu), dim3(grid_ldc),dim3(block_ldc),shared_mem_size_cdc, 0, pCodes_ChromaDC_dev, 16, packed_words_CDC_dev, word_count_CDC_dev, leftover_numbits_CDC_dev, leftover_value_CDC_dev ); //Luma Ac hipLaunchKernelGGL(( cavlc_bitpack_block_cu), dim3(grid_lac),dim3(block_lac),shared_mem_size_ac, 0, pCodes_LumaAC_dev, 26, packed_words_LAC_dev, word_count_LAC_dev, leftover_numbits_LAC_dev, leftover_value_LAC_dev ); //Chroma Ac hipLaunchKernelGGL(( cavlc_bitpack_block_cu), dim3(grid_cac),dim3(block_cac),shared_mem_size_ac, 0, pCodes_ChromaAC_dev, 26, packed_words_CAC_dev, word_count_CAC_dev, leftover_numbits_CAC_dev, leftover_value_CAC_dev ); hipLaunchKernelGGL(( cavlc_bitpack_MB_cu), dim3(grid_ldc),dim3(block_ldc), 0, 0, //intput packet codes of head,lumadc,lumaac,chromadc... packed_words_head_dev, packed_words_LDC_dev, packed_words_LAC_dev, packed_words_CDC_dev, packed_words_CAC_dev, word_count_head_dev, word_count_LDC_dev, word_count_LAC_dev, word_count_CDC_dev, word_count_CAC_dev, leftover_numbits_head_dev, leftover_numbits_LDC_dev, leftover_numbits_LAC_dev, leftover_numbits_CDC_dev, leftover_numbits_CAC_dev, leftover_value_head_dev, leftover_value_LDC_dev, leftover_value_LAC_dev, leftover_value_CDC_dev, leftover_value_CAC_dev, dev_ZigZag, 64, ((I_Slice) ? 4 : 6), SkipBlock, //ouput packet words for mb total_packet_word_mb, total_word_count_mb, total_leftover_numbits_mb, total_leftover_value_mb ); dim3 block(num_mbs/p_enc->i_slice_num,1,1); dim3 grid(p_enc->i_slice_num,1,1); hipLaunchKernelGGL(( compute_out_position), dim3(grid),dim3(block), 0, 0, //input: word of mb and leftover_numbits total_word_count_mb, total_leftover_numbits_mb, //output: out position for mb and shift bits out_index_dev, shift_bits_dev ); hipLaunchKernelGGL(( parallel_write), dim3(grid),dim3(block), 0, 0, total_packet_word_mb, total_word_count_mb, SkipBlock, total_leftover_numbits_mb, total_leftover_value_mb, out_index_dev, shift_bits_dev, num_mbs/p_enc->i_slice_num, //out_put packet word for slice total_packet_word, word_num_slice, leftover_numbits_slice, leftover_value_slice ); unsigned int *pCodes_packed = (unsigned int*) malloc(BLOCKS_PER_MB*13*num_mbs*sizeof(unsigned int)); unsigned int *word_count = (unsigned int*) malloc(BLOCKS_PER_MB*num_mbs*sizeof(unsigned int)); int *leftover_numbits = ( int*) malloc(BLOCKS_PER_MB*num_mbs*sizeof( int)); unsigned int *left_value = (unsigned int*) malloc(BLOCKS_PER_MB*num_mbs*sizeof(unsigned int)); cutilSafeCall(hipMemcpy(word_count,word_num_slice,(p_enc->i_slice_num)*sizeof(unsigned int),hipMemcpyDeviceToHost)); cutilSafeCall(hipMemcpy(pCodes_packed,total_packet_word,word_count[p_enc->i_slice_num-1]*sizeof(unsigned int),hipMemcpyDeviceToHost)); cutilSafeCall(hipMemcpy(leftover_numbits,leftover_numbits_slice,(p_enc->i_slice_num)*sizeof( int),hipMemcpyDeviceToHost)); cutilSafeCall(hipMemcpy(left_value,leftover_value_slice,p_enc->i_slice_num*sizeof(unsigned int),hipMemcpyDeviceToHost)); pPackedCurr = pCodes_packed; int num_word = 0; for( i = 0;i<p_enc->i_slice_num;i++) { encoder_context_t *penc = p_enc->slice[i]; pBitstream = &penc->bitstream; num_word = word_count[i]-num_word; cavlc_put_bits(pPackedCurr, num_word, pBitstream); if (leftover_numbits[i]) put_bits(pBitstream, leftover_numbits[i], left_value[i] >> (32 - leftover_numbits[i])); pPackedCurr +=num_word; num_word = word_count[i]; header_bits += write_last_skip_count(PrevSkipMB[i], pBitstream); } start1 = clock(); cutilSafeCall(hipFree(pCodes_Header_MB_dev)); cutilSafeCall(hipFree(packed_words_head_dev)); cutilSafeCall(hipFree(word_count_head_dev)); cutilSafeCall(hipFree(leftover_numbits_head_dev)); cutilSafeCall(hipFree(leftover_value_head_dev)); cutilSafeCall(hipFree(packed_words_LDC_dev)); cutilSafeCall(hipFree(word_count_LDC_dev)); cutilSafeCall(hipFree(leftover_numbits_LDC_dev)); cutilSafeCall(hipFree(leftover_value_LDC_dev)); cutilSafeCall(hipFree(packed_words_LAC_dev)); cutilSafeCall(hipFree(word_count_LAC_dev)); cutilSafeCall(hipFree(leftover_numbits_LAC_dev)); cutilSafeCall(hipFree(leftover_value_LAC_dev)); cutilSafeCall(hipFree(packed_words_CDC_dev)); cutilSafeCall(hipFree(word_count_CDC_dev)); cutilSafeCall(hipFree(leftover_numbits_CDC_dev)); cutilSafeCall(hipFree(leftover_value_CDC_dev)); cutilSafeCall(hipFree(packed_words_CAC_dev)); cutilSafeCall(hipFree(word_count_CAC_dev)); cutilSafeCall(hipFree(leftover_numbits_CAC_dev)); cutilSafeCall(hipFree(leftover_value_CAC_dev)); cutilSafeCall(hipFree(out_index_dev)); cutilSafeCall(hipFree(shift_bits_dev)); cutilSafeCall(hipFree(total_packet_word)); cutilSafeCall(hipFree(leftover_numbits_slice)); cutilSafeCall(hipFree(leftover_value_slice)); cutilSafeCall(hipFree(word_num_slice)); cutilSafeCall(hipFree(total_packet_word_mb)); cutilSafeCall(hipFree(total_word_count_mb)); cutilSafeCall(hipFree(total_leftover_numbits_mb)); cutilSafeCall(hipFree(total_leftover_value_mb)); cutilSafeCall(hipFree(HeaderCodeBits_dev)); cutilSafeCall(hipFree(CBPTable_dev)); hipDeviceSynchronize(); end = clock(); p_enc->new_timers.cavlc_timers += (end-start); p_enc->new_timers.rc_total += (end-start); cutilSafeCall(hipFree(pMBContextOut_LumaDC_dev)); cutilSafeCall(hipFree(pMBContextOut_LumaAC_dev)); cutilSafeCall(hipFree(SkipBlock)); cutilSafeCall(hipFree(PrevSkipMB_dev)); cutilSafeCall(hipFree(pMBContextOut_ChromaDC_dev)); cutilSafeCall(hipFree(pDctCoefs_ZigZag_ChromaAC)); cutilSafeCall(hipFree(pMBContextOut_ChromaAC_dev)); cutilSafeCall(hipFree(pTextureSymbols_LumaDC_dev)); cutilSafeCall(hipFree(pLevelSymbolSuffixLength0_LumaDC_dev)); cutilSafeCall(hipFree(pLevelSymbols_LumaDC_dev)); cutilSafeCall(hipFree(pRunSymbols_LumaDC_dev)); cutilSafeCall(hipFree(pTextureSymbols_LumaAC_dev)); cutilSafeCall(hipFree(pLevelSymbolSuffixLength0_LumaAC_dev)); cutilSafeCall(hipFree(pLevelSymbols_LumaAC_dev)); cutilSafeCall(hipFree(pRunSymbols_LumaAC_dev)); cutilSafeCall(hipFree(pTextureSymbols_ChromaDC_dev)); cutilSafeCall(hipFree(pLevelSymbolSuffixLength0_ChromaDC_dev)); cutilSafeCall(hipFree(pLevelSymbols_ChromaDC_dev)); cutilSafeCall(hipFree(pRunSymbols_ChromaDC_dev)); cutilSafeCall(hipFree(pTextureSymbols_ChromaAC_dev)); cutilSafeCall(hipFree(pLevelSymbolSuffixLength0_ChromaAC_dev)); cutilSafeCall(hipFree(pLevelSymbols_ChromaAC_dev)); cutilSafeCall(hipFree(pRunSymbols_ChromaAC_dev)); cutilSafeCall(hipFree(pCodes_LumaDC_dev)); cutilSafeCall(hipFree(pCodes_LumaAC_dev)); cutilSafeCall(hipFree(pCodes_ChromaAC_dev)); cutilSafeCall(hipFree(CoeffTokenTable_dev)); cutilSafeCall(hipFree(TotalZerosTable_dev)); cutilSafeCall(hipFree(RunIndexTable_dev)); cutilSafeCall(hipFree(RunTable_dev)); cutilSafeCall(hipFree(pCodes_ChromaDC_dev)); cutilSafeCall(hipFree(CoeffTokenChromaDCTable_dev)); cutilSafeCall(hipFree(TotalZerosChromaDCTable_dev)); cutilSafeCall(hipFree(dev_input)); cutilSafeCall(hipFree(dev_dct_coefs)); cutilSafeCall(hipFree(dev_dc_coefs)); cutilSafeCall(hipFree(dev_Quant_tab)); cutilSafeCall(hipFree(dev_Dquant_tab)); cutilSafeCall(hipFree(dev_QpData)); cutilSafeCall(hipFree(dev_input_uv)); cutilSafeCall(hipFree(Quant_tab_uv)); cutilSafeCall(hipFree(Dquant_tab_uv)); cutilSafeCall(hipFree(dev_QpData_uv)); cutilSafeCall(hipFree(dev_dct_coefs_uv)); cutilSafeCall(hipFree(dev_dc_coefs_uv)); cutilSafeCall(hipFree(dev_ZigZag)); free(pCodes_packed); free(PrevSkipMB); free(word_count); free(leftover_numbits); free(left_value); end1 = clock(); p_enc->new_timers.prep_encode_frame += (end1 - start1); E_ERR err = ERR_SUCCESS; start = clock(); S_BLK_MB_INFO *pBlkMBInfo = p_enc->pBlkMBInfo; int disable_deblocking_filter_idc = p_enc->loopfilter_params.disable_flag; int slice_alpha_c0_offset = p_enc->loopfilter_params.alpha_c0_offset; int slice_beta_offset = p_enc->loopfilter_params.beta_offset; yuv_frame_t *frame = p_enc->pRecFrame; // Input & Output unsigned char *BSRef_d; unsigned char *QP_TO_Chroma_dev; unsigned char *ALPHA_Table_dev; unsigned char *BETA_Table_dev; unsigned char *CLIP_Table_dev; cutilSafeCall(hipMalloc((void**)&BSRef_d,sizeof(unsigned char)*2*BLOCKS_PER_MB*num_mb_hor*num_mb_ver)); cutilSafeCall(hipMalloc((void**)&ALPHA_Table_dev,sizeof(unsigned char)*NUM_QP)); cutilSafeCall(hipMalloc((void**)&BETA_Table_dev,sizeof(unsigned char)*NUM_QP)); cutilSafeCall(hipMalloc((void**)&CLIP_Table_dev,sizeof(unsigned char)*NUM_QP*5)); cutilSafeCall(hipMalloc((void**)&QP_TO_Chroma_dev,sizeof(unsigned char)*NUM_QP)); cutilSafeCall(hipMemcpy(ALPHA_Table_dev,ALPHA_TABLE,sizeof(unsigned char)*NUM_QP,hipMemcpyHostToDevice)); cutilSafeCall(hipMemcpy(BETA_Table_dev,BETA_TABLE,sizeof(unsigned char)*NUM_QP,hipMemcpyHostToDevice)); cutilSafeCall(hipMemcpy(CLIP_Table_dev,CLIP_TAB,sizeof(unsigned char)*NUM_QP*5,hipMemcpyHostToDevice)); cutilSafeCall(hipMemcpy(QP_TO_Chroma_dev,QP_TO_CHROMA_MAPPING,sizeof(unsigned char)*NUM_QP,hipMemcpyHostToDevice)); dim3 dimblock(BLOCKS_PER_MB,8,1); //168MB dim3 dimgrid(num_mb_hor/8,num_mb_ver,1); dim3 block_ver(16,2,1); dim3 grid_ver((num_mb_ver+1)>>1,2,1); dim3 block_hor(16,2,1); dim3 grid_hor(num_mb_hor>>1,2,1); hipLaunchKernelGGL(( cudaCalcBoundaryStrength_kernel), dim3(dimgrid),dim3(dimblock), 0, 0, dev_blk_mb_info, BSRef_d, disable_deblocking_filter_idc, num_mb_hor, num_mb_ver, p_enc->i_slice_num, I_Slice); hipLaunchKernelGGL(( cudaDeblockMB_kernel_ver), dim3(grid_ver),dim3(block_ver), 0, 0, BSRef_d, QP, num_mb_hor, num_mb_ver, width_ref, height_ref, RECON_FRAME_Y_OFFSET* width_ref + RECON_FRAME_X_OFFSET, RECON_FRAME_Y_OFFSET_C * width_ref_c + RECON_FRAME_X_OFFSET_C, dev_recon, dev_recon_uv, dev_recon_uv+(width_ref*height_ref>>2), QP_TO_Chroma_dev, ALPHA_Table_dev, BETA_Table_dev, CLIP_Table_dev ); hipLaunchKernelGGL(( cudaDeblockMB_kernel_hor), dim3(grid_hor),dim3(block_hor), 0, 0, BSRef_d, QP, num_mb_hor, num_mb_ver, width_ref, height_ref, RECON_FRAME_Y_OFFSET* width_ref + RECON_FRAME_X_OFFSET, RECON_FRAME_Y_OFFSET_C * width_ref_c + RECON_FRAME_X_OFFSET_C, dev_recon, dev_recon_uv, dev_recon_uv+(width_ref*height_ref>>2), QP_TO_Chroma_dev, ALPHA_Table_dev, BETA_Table_dev, CLIP_Table_dev ); cutilSafeCall(hipMemcpy(frame->y,dev_recon,width_ref*height_ref*sizeof(unsigned char),hipMemcpyDeviceToHost)); cutilSafeCall(hipMemcpy(frame->u,dev_recon_uv,width_ref*height_ref*sizeof(unsigned char)>>2,hipMemcpyDeviceToHost)); cutilSafeCall(hipMemcpy(frame->v,dev_recon_uv+(width_ref*height_ref>>2),width_ref*height_ref*sizeof(unsigned char)>>2,hipMemcpyDeviceToHost)); cutilSafeCall(hipFree(BSRef_d)); cutilSafeCall(hipFree(QP_TO_Chroma_dev)); cutilSafeCall(hipFree(ALPHA_Table_dev)); cutilSafeCall(hipFree(CLIP_Table_dev)); cutilSafeCall(hipFree(BETA_Table_dev)); cutilSafeCall(hipFree(dev_blk_mb_info)); cutilSafeCall(hipFree(dev_recon)); cutilSafeCall(hipFree(dev_recon_uv)); pad_deblock_out_frame(p_enc->pRecFrame, REFERENCE_FRAME_PAD_AMT); // dec end = clock(); p_enc->new_timers.de_block +=(end - start); }
d3e6d7cebda99d8c3ca9831606c6859b893549ef.cu
/* ##################################################################### */ /* */ /* Notice: COPYRIGHT (C) GPU,GROUP. 2010 */ /* THIS PROGRAM IS PROVIDED UNDER THE TERMS OF GPU GROUP */ /* THE PROGRAM MAY ONLY */ /* BE USED IN A MANNER EXPLICITLY SPECIFIED IN THE GPU, */ /* WHICH INCLUDES LIMITATIONS ON COPYING, MODIFYING, */ /* REDISTRIBUTION AND WARANTIES. UNAUTHORIZED USE OF THIS */ /* PROGRAM IS SCTRICTLY PROHIBITED. */ /* ##################################################################### */ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <time.h> #include <cutil_inline.h> #include <cutil_inline_runtime.h> #include <cutil.h> #include "../inc/me_common.h" #include "../inc/me_context.h" #include "../inc/residual_coding.h" #include "../inc/encoder_tables.h" #include "../inc/cavlc_data.h" #include "../inc/cavlc.h" #include "../inc/deblock.h" #include "../inc/h264_common.h" #include "../inc/const_defines.h" #include "../inc/entropy_data.h" #include "../inc/encoder_context.h" #include "../inc/output.h" //include projec #include "me_LR_search_kernel.cu" #include "me_refinement_kernel.cu" #include "iframe_residual_coding_kernel.cu" #include "iframe_residual_chroma_kernel.cu" #include "pframe_inter_residual_coding_kernel.cu" #include "calc_cbp_and_total_coef.cu" #include "cavlc_block_context_kernel.cu" #include "cavlc_texture_symbols_kernel.cu" #include "cavlc_texture_codes_kernel.cu" #include "cavlc_header_code_kernel.cu" #include "cavlc_bit_pack_kernel.cu" #include "deblock_kernel.cu" ///新修改的函数及kernel #include "intra_coding_kernel.cu" #include "intra_coding_kernel_chroma.cu" #include "pframe_LR_serach_kernel.cu" //每一帧的编码入口函数 void encode_cuda(encoder_context_t *p_enc ) { clock_t start,end,start1,end1; int enc_width,enc_height,enc_width_c,enc_height_c; int width_ref,height_ref,width_ref_c,height_ref_c; int num_mb_ver, num_mb_hor; int num_mbs; int i, j; int frame_sad_sum; S_QP_DATA QpData; short *pQuantTable; short *pDQuantTable; S_QP_DATA QpData_chroma; short *pQuantTable_chroma; short *pDQuantTable_chroma; start1 = clock(); unsigned char *p_input = p_enc->input_frame.y; unsigned char *p_recon = p_enc->pRecFrame->y; short *p_dct_coefs = p_enc->transform_coefs.pDctCoefs; short *p_dc_coefs = p_enc->transform_coefs.pDcCoefs; int QP = p_enc->frame_info.frame_qp; S_BLK_MB_INFO *p_blk_mb_info = p_enc->pBlkMBInfo; int *p_mb_qps = p_enc->p_mb_qps; int constrained_intra = p_enc->PictureParameterSet.constrained_intra_pred_flag; int intra_pred_select = p_enc->intra_prediction_selection; unsigned char *p_input_u = p_enc->input_frame.u; unsigned char *p_input_v = p_enc->input_frame.v; unsigned char *p_recon_u = p_enc->pRecFrame->u; unsigned char *p_recon_v = p_enc->pRecFrame->v; short *p_dct_coefs_u = p_enc->transform_coefs.pDctCoefs_u; short *p_dct_coefs_v = p_enc->transform_coefs.pDctCoefs_v; short *p_dc_coefs_u = p_enc->transform_coefs.pDcCoefs_u; short *p_dc_coefs_v = p_enc->transform_coefs.pDcCoefs_v; unsigned char *p_rec_ptr; unsigned char *p_rec_u_ptr, *p_rec_v_ptr; enc_width = p_enc->width; enc_height = p_enc->height; enc_width_c = p_enc->width/2; enc_height_c = p_enc->height/2; width_ref = enc_width + 2*REFERENCE_FRAME_PAD_AMT; height_ref = enc_height + 2*REFERENCE_FRAME_PAD_AMT; width_ref_c = width_ref>>1; height_ref_c = height_ref>>1; num_mb_hor = enc_width / MB_WIDTH; num_mb_ver = enc_height / MB_HEIGHT; num_mbs = num_mb_hor*num_mb_ver; p_rec_ptr = p_recon + RECON_FRAME_Y_OFFSET* width_ref + RECON_FRAME_X_OFFSET; p_rec_u_ptr = p_recon_u + RECON_FRAME_Y_OFFSET_C * width_ref_c + RECON_FRAME_X_OFFSET_C; p_rec_v_ptr = p_recon_v + RECON_FRAME_Y_OFFSET_C * width_ref_c + RECON_FRAME_X_OFFSET_C; unsigned char *dev_input; unsigned char *dev_recon; S_BLK_MB_INFO *dev_blk_mb_info; S_QP_DATA *dev_QpData; short *dev_dct_coefs; short *dev_dc_coefs; short *dev_Quant_tab; short *dev_Dquant_tab; unsigned char *dev_input_uv; unsigned char *dev_recon_uv; S_QP_DATA *dev_QpData_uv; short *dev_dct_coefs_uv; short *dev_dc_coefs_uv; short *Quant_tab_uv; short *Dquant_tab_uv; int *dev_ZigZag; cutilSafeCall(cudaMalloc((void**) &dev_QpData,sizeof(S_QP_DATA))); cutilSafeCall(cudaMalloc((void**) &dev_QpData_uv,sizeof(S_QP_DATA))); //为重建数据分配显存空间 cutilSafeCall(cudaMalloc((void**) &Quant_tab_uv,BLOCKS_PER_MB*sizeof(short))); cutilSafeCall(cudaMalloc((void**) &Dquant_tab_uv,BLOCKS_PER_MB*sizeof(short))); cutilSafeCall(cudaMalloc((void**) &dev_ZigZag,BLOCKS_PER_MB*sizeof(int))); cutilSafeCall(cudaMemcpy(dev_ZigZag,ZigZagScan,16*sizeof(int),cudaMemcpyHostToDevice)); cutilSafeCall(cudaMalloc((void**) &dev_input,MB_WIDTH*MB_HEIGHT*num_mbs)); //为输入的数据分配显存空间 cutilSafeCall(cudaMalloc((void**) &dev_recon,height_ref*width_ref*sizeof(char))); //为重建数据分配显存空间(1088*1952B) cutilSafeCall(cudaMalloc((void**) &dev_recon_uv,height_ref_c*width_ref_c*2)); //为重建数据分配显存空间 cutilSafeCall(cudaMalloc((void**) &dev_input_uv,MB_TOTAL_SIZE_C*num_mbs*2)); //为输入的数据分配显存空间 cutilSafeCall(cudaMalloc((void**) &dev_blk_mb_info,BLOCKS_PER_MB*sizeof(S_BLK_MB_INFO)*num_mbs)); cutilSafeCall(cudaMalloc((void**) &dev_dct_coefs,MB_TOTAL_SIZE*num_mbs*sizeof(short))); //为变换编码后数据分配显存空间 cutilSafeCall(cudaMalloc((void**) &dev_dc_coefs,BLOCKS_PER_MB*num_mbs*sizeof(short))); //为变换编码后直流数据分配显存空间 cutilSafeCall(cudaMalloc((void**) &dev_dct_coefs_uv, MB_TOTAL_SIZE_C*num_mbs*2*sizeof(short))); cutilSafeCall(cudaMalloc((void**) &dev_dc_coefs_uv, BLOCKS_PER_MB_C*num_mbs*2*sizeof(short))); //将量化表载入常量存储器中 cutilSafeCall(cudaMalloc((void**) &dev_Quant_tab,BLOCKS_PER_MB*sizeof(short))); //为变换编码后数据分配显存空间 cutilSafeCall(cudaMalloc((void**) &dev_Dquant_tab,BLOCKS_PER_MB*sizeof(short))); //为变换编码后数据分配显存空间 end1 = clock(); p_enc->new_timers.prep_encode_frame += (end1 - start1); if(p_enc->slice_params.slice_type == SLICE_I) { start = clock(); for (i = 0; i < num_mbs * BLOCKS_PER_MB; i++) { p_blk_mb_info[i].QP = QP; } InitQPDataAndTablesFromQP(&QpData, &pQuantTable, &pDQuantTable, QP, 1, 1); InitQPDataAndTablesFromQP(&QpData_chroma, &pQuantTable_chroma, &pDQuantTable_chroma, p_blk_mb_info->QP, 1, 0); cutilSafeCall(cudaMemcpy(dev_QpData,&QpData,sizeof(S_QP_DATA),cudaMemcpyHostToDevice)); cutilSafeCall(cudaMemcpy(dev_Quant_tab,pQuantTable,BLOCKS_PER_MB*sizeof(short),cudaMemcpyHostToDevice)); cutilSafeCall(cudaMemcpy(dev_Dquant_tab,pDQuantTable,BLOCKS_PER_MB*sizeof(short),cudaMemcpyHostToDevice)); cutilSafeCall(cudaMemcpy(dev_input,p_input,MB_TOTAL_SIZE*num_mbs,cudaMemcpyHostToDevice)); cutilSafeCall(cudaMemcpy(dev_blk_mb_info,p_blk_mb_info,BLOCKS_PER_MB*sizeof(S_BLK_MB_INFO)*num_mbs,cudaMemcpyHostToDevice)); dim3 grid(p_enc->i_slice_num, 1, 1); //grid的设置 dim3 threads(4, 4, 1); //每一个block中threads的设置,受限于帧内编码的数据依赖限制,能够并行的threads为16. dim3 block(4, 4, 16); iframe_luma_residual_coding <<<grid,block>>>(dev_input, enc_width, num_mb_hor, num_mb_ver, dev_recon + RECON_FRAME_Y_OFFSET* width_ref + RECON_FRAME_X_OFFSET, width_ref, dev_dct_coefs, dev_dc_coefs, dev_blk_mb_info, dev_Quant_tab, dev_Dquant_tab, dev_QpData, constrained_intra, intra_pred_select, p_enc->i_slice_num ); cutilSafeCall(cudaMemcpy(dev_input_uv,p_input_u,MB_TOTAL_SIZE_C*num_mbs,cudaMemcpyHostToDevice)); //加载U分量 cutilSafeCall(cudaMemcpy(dev_input_uv+MB_TOTAL_SIZE_C*num_mbs,p_input_v,MB_TOTAL_SIZE_C*num_mbs,cudaMemcpyHostToDevice));//加载V分量 cutilSafeCall(cudaMemcpy(dev_QpData_uv,&QpData_chroma,sizeof(S_QP_DATA),cudaMemcpyHostToDevice)); cutilSafeCall(cudaMemcpy(Quant_tab_uv,pQuantTable_chroma,BLOCKS_PER_MB*sizeof(short),cudaMemcpyHostToDevice)); cutilSafeCall(cudaMemcpy(Dquant_tab_uv,pDQuantTable_chroma,BLOCKS_PER_MB*sizeof(short),cudaMemcpyHostToDevice)); dim3 grid_chroma(p_enc->i_slice_num, 1, 1); dim3 threads_chroma(2, 4, 1); dim3 block_chroma(4, 2, 16); iframe_residual_coding_chroam<<<grid_chroma,block_chroma>>>(dev_input_uv, dev_blk_mb_info, dev_QpData_uv, Quant_tab_uv, Dquant_tab_uv, dev_recon_uv+RECON_FRAME_Y_OFFSET_C * width_ref_c + RECON_FRAME_X_OFFSET_C, dev_dct_coefs_uv, dev_dc_coefs_uv, enc_width_c, enc_height_c, width_ref_c, height_ref_c, num_mb_hor, num_mb_ver, p_enc->i_slice_num ); dim3 grid_cbp_luma(num_mb_hor/8, num_mb_ver, 1); dim3 threads_cbp_luma(BLK_WIDTH, BLK_HEIGHT, 8); CalcCBP_and_TotalCoeff_Luma_cuda<<<grid_cbp_luma,threads_cbp_luma>>>(dev_dct_coefs, dev_blk_mb_info); dim3 grid_cbp_chroma(num_mb_hor/8, num_mb_ver, 1); dim3 threads_cbp_chroma(BLOCKS_PER_MB_C, 8, 2); CalcCBP_and_TotalCoeff_Chroma_cuda<<<grid_cbp_chroma,threads_cbp_chroma>>>(dev_dct_coefs_uv, dev_dc_coefs_uv, dev_blk_mb_info, enc_width_c, enc_height_c); cutilSafeCall(cudaMemcpy(p_blk_mb_info,dev_blk_mb_info,BLOCKS_PER_MB*num_mbs*sizeof(S_BLK_MB_INFO),cudaMemcpyDeviceToHost)); frame_sad_sum = 0; for(j = 0; j < num_mb_ver; j++) { for(i = 0; i < num_mb_hor; i++) { frame_sad_sum += (p_enc->pBlkMBInfo + (j * num_mb_hor + i) * BLOCKS_PER_MB)->MinSAD; } } p_enc->avg_mb_sad = frame_sad_sum / (num_mb_hor * num_mb_ver); p_enc->frame_info.num_intra_mb = num_mb_hor * num_mb_ver; end = clock(); p_enc->new_timers.iframe_residual += (end-start); } else { start = clock(); int num_mb_hor_ref,num_mb_ver_ref; int RefStride2Begin; int RefStride2BeginUV; int decimate_ratio = 2; int M = decimate_ratio; RC_CONTEXT *p_rc; ME_CONTEXT *p_me; CUVME_MV_RESULTS *p_me_mv_results; CUVME_MB_INFO *p_me_mb_info; CUVME_Y_BUFFER me_src; CUVME_Y_BUFFER *ptr_me_src; CUVME_MB_CHARAC *p_me_mb_characs; ME_CONTEXT *p_sc_me; ME_context_t *p_ME_context; int do_zero_search = 0; int do_low_res = 0; int do_int_search = 0; int do_int_and_halfpel_search = 0; int do_decimation_for_low_res = 0; unsigned char *i_InputLuma; SINT32 skipbias_factor; int avg_var; CUVME_Y_BUFFER me_ref; CUVME_Y_BUFFER me_pred; unsigned int AvgMbSAD = p_enc->avg_mb_sad; unsigned char *dev_input_ref; unsigned char *dev_out_HR_ref; unsigned char *dev_out_QR_ref; unsigned char *dev_input_src; unsigned char *dev_out_HR_src; unsigned char *dev_out_QR_src; CUVME_MV_RESULTS *dev_mvsLocal; CUVME_MB_INFO *dev_mb_info; unsigned int *IntegerPelCenterVecs; unsigned char *dev_out_pred; CUVME_MV_RESULTS *integer_mvmap; CUVME_MB_INFO *mb_info; int *dev_CoeffCosts; unsigned int lambda_factor_rc; lambda_factor_rc = QP2QUANT_NEW[p_enc->frame_info.frame_qp]; if(p_enc->intra_mb_level == 80) lambda_factor_rc <<= 8; else if(p_enc->intra_mb_level == 90) lambda_factor_rc <<= 7; else if(p_enc->intra_mb_level == 100) lambda_factor_rc <<= 5; else lambda_factor_rc <<= p_enc->intra_mb_level; const int ZigZag[16] = {0, 1, 4, 8, 5, 2, 3, 6, 9, 12, 13, 10, 7, 11, 14, 15}; const int CoeffCosts[16] = {3,2,2,1,1,1,0,0,0,0,0,0,0,0,0,0}; S_BLK_MB_INFO *pBlkMBInfo; E_ERR err = ERR_SUCCESS; CUVME_ERROR me_err = CUVME_ERR_SUCCESS; num_mb_hor_ref = width_ref/MB_WIDTH; num_mb_ver_ref = height_ref/MB_HEIGHT; RefStride2Begin = ((REFERENCE_FRAME_PAD_AMT * width_ref) + REFERENCE_FRAME_PAD_AMT); RefStride2BeginUV = RECON_FRAME_Y_OFFSET_C * width_ref_c + RECON_FRAME_X_OFFSET_C;/*((REFERENCE_FRAME_PAD_AMT/2 * p_enc->padded_ref_frame.width/2) + REFERENCE_FRAME_PAD_AMT/2);*/ p_me = &p_enc->me_context; p_rc = &p_enc->rc_context; p_me_mv_results = p_me->p_me_mv_results; p_me_mb_info = p_me->p_me_mb_info; p_me_mb_characs = p_me->p_me_mb_characs; err = ERR_SUCCESS; me_err = CUVME_ERR_SUCCESS; ptr_me_src = &me_src; ptr_me_src->y = p_enc->input_frame.y; ptr_me_src->buffer_width = p_enc->input_frame.width; ptr_me_src->buffer_height = p_enc->input_frame.height; ptr_me_src->active_width = p_enc->width; ptr_me_src->active_height = p_enc->height; ptr_me_src->offset_x = 0; ptr_me_src->offset_y = 0; me_pred.y = p_enc->inter_pred_frame.y; me_pred.buffer_width = p_enc->inter_pred_frame.width; me_pred.buffer_height = p_enc->inter_pred_frame.height; me_pred.active_width = p_enc->width; me_pred.active_height = p_enc->height; me_pred.offset_x = 0; me_pred.offset_y = 0; me_ref.y = p_enc->pRefFrame->y; me_ref.buffer_width = p_enc->pRefFrame->width; me_ref.buffer_height = p_enc->pRefFrame->height; me_ref.active_width = p_enc->width; me_ref.active_height = p_enc->height; me_ref.offset_x = REFERENCE_FRAME_PAD_AMT; me_ref.offset_y = REFERENCE_FRAME_PAD_AMT; // Transform and reconstruct InitQPDataAndTablesFromQP(&QpData_chroma, &pQuantTable_chroma, &pDQuantTable_chroma, p_enc->frame_info.frame_qp, 0, 0); InitQPDataAndTablesFromQP (&QpData, &pQuantTable, &pDQuantTable, p_enc->frame_info.frame_qp, 0, 1); p_ME_context = (ME_context_t *)p_me->me_handle; SINT32 lambda_factor; /********** decide between forward or backward reference frame to be used *******************/ // Setting Lambda //TODO: confirm this lambda_factor = QP2QUANT_MELIB[p_ME_context->FrameQP - 12]; skipbias_factor = 1; if(p_ME_context->FrameQP > 42) { skipbias_factor = 2; } me_err = cuvme_set_reference_frame(p_me->me_handle, &me_ref, 0, 0); if (!me_err) { me_err = cuvme_set_predicted_picture(p_me->me_handle, &me_pred); } if (!me_err) { if((p_enc->mb_adapt_qp_on == 1) || (p_enc->intra_mb_level == 0)) { me_err = cuvme_set_return_mb_characteristics(p_me->me_handle, p_me_mb_characs); } } //ME 处理 if(!me_err) { p_ME_context->ptr_src_picture = ptr_me_src; if(p_ME_context->nonpersistent_mem_givenby_app_flag) { me_assign_nonpersistentmem_pointers(p_ME_context); } if(p_ME_context->num_mvs == 1) { p_ME_context->ptr_mvs[0] = p_me_mv_results; } else { p_ME_context->ptr_mvs[0] = p_ME_context->ptr_mvs_local[0]; p_ME_context->ptr_res_out[0] = p_me_mv_results; } p_ME_context->ptr_mb_info[0] = p_me_mb_info; // We need to set all the MVs to zero only for zero mode if((p_ME_context->me_mode < 5) || (p_ME_context->CRef)) { unsigned int NumMbs = (p_ME_context->width / MB_WIDTH) * (p_ME_context->height / MB_HEIGHT); memset(p_ME_context->ptr_mvs[0], 0, NumMbs*sizeof(CUVME_MV_RESULTS)); memset(p_ME_context->ptr_mb_info[0], 0, NumMbs*sizeof(CUVME_MB_INFO)); } if(!p_ME_context->num_lowres_forw_references) { p_ME_context->forw_quarter_res_ref[0] = p_ME_context->malloced_forw_quarter_res_ref[0]; p_ME_context->forw_half_res_ref[0] = p_ME_context->malloced_forw_half_res_ref[0]; } if(!p_ME_context->num_lowres_back_references) { p_ME_context->back_quarter_res_ref[0] = p_ME_context->malloced_back_quarter_res_ref[0]; p_ME_context->back_half_res_ref[0] = p_ME_context->malloced_back_half_res_ref[0]; } if(p_ME_context->source_format == RASTER_ORDER) { { me_ConvertRasterToBlockFlattened(ptr_me_src->y, p_ME_context->block_flat_src->y, p_ME_context->width,p_ME_context->height); } p_ME_context->block_flat_src->buffer_width = ptr_me_src->buffer_width; p_ME_context->block_flat_src->buffer_height = ptr_me_src->buffer_height; p_ME_context->block_flat_src->active_width = ptr_me_src->active_width; p_ME_context->block_flat_src->active_height = ptr_me_src->active_height; p_ME_context->block_flat_src->offset_x = ptr_me_src->offset_x; p_ME_context->block_flat_src->offset_y = ptr_me_src->offset_y; p_ME_context->ptr_src_picture = p_ME_context->block_flat_src; } p_ME_context->do_zero_search = 0; p_ME_context->do_low_res = 0; p_ME_context->do_int_search = 0; p_ME_context->do_int_and_halfpel_search = 0; p_ME_context->do_decimation_for_low_res = 0; switch(p_ME_context->me_mode) { case 0: p_ME_context->do_zero_search = 1; do_zero_search = 1; break; case 5: case 10: p_ME_context->do_low_res = 1; do_low_res = 1; break; case 20: case 22: p_ME_context->do_low_res = 1; do_low_res = 1; p_ME_context->do_int_search = 1; do_int_search = 1; break; case 30: case 32: p_ME_context->do_low_res = 1; do_low_res = 1; p_ME_context->do_int_and_halfpel_search = 1; do_int_and_halfpel_search = 1; break; default: break; } if(!p_ME_context->num_lowres_forw_references) { p_ME_context->do_decimation_for_low_res = 1; do_decimation_for_low_res = 1; } if((p_ME_context->FrameQP - 12) > 0) p_ME_context->lambda_factor = QP2QUANT_MELIB[p_ME_context->FrameQP - 12];// else p_ME_context->lambda_factor = QP2QUANT_MELIB[0];// i_InputLuma = p_ME_context->ptr_src_picture->y; end = clock(); p_enc->new_timers.pframe_total +=(end -start); start = clock(); if(do_decimation_for_low_res) { cutilSafeCall(cudaMalloc((void**) &dev_out_HR_ref,(height_ref/M)*(width_ref/M))); //为切分后1/2像素数据分配空间,为输入数据的1/4大小 cutilSafeCall(cudaMalloc((void**) &dev_out_QR_ref,(height_ref/(2*M))*(width_ref/(2*M)))); cutilSafeCall(cudaMalloc((void**) &dev_input_ref,width_ref*height_ref)); //为输入的数据分配显存空间 cutilSafeCall(cudaMemcpy(dev_input_ref,p_ME_context->ptr_forw_ref_frame[0]->y,width_ref*height_ref,cudaMemcpyHostToDevice)); dim3 grid_ref(num_mb_hor_ref>>1,num_mb_ver_ref, 1); //grid的设置 dim3 threads_ref((MB_WIDTH*2)>>1, MB_HEIGHT>>1, 1); me_Decimate_kernel<<<grid_ref,threads_ref>>>(dev_input_ref,dev_out_HR_ref,dev_out_QR_ref,height_ref,width_ref); } if(do_low_res) { cutilSafeCall(cudaMemcpy(dev_input,p_enc->input_frame.y,MB_TOTAL_SIZE*num_mbs*sizeof(unsigned char),cudaMemcpyHostToDevice)); cutilSafeCall(cudaMalloc((void**) &dev_out_HR_src,(enc_width/M)*(enc_height/M))); //为切分后1/2像素数据分配空间,为输入数据的1/4大小 cutilSafeCall(cudaMalloc((void**) &dev_out_QR_src,(enc_width/(2*M))*(enc_height/(2*M)))); cutilSafeCall(cudaMalloc((void**) &dev_mvsLocal,num_mb_hor*num_mb_ver*sizeof(CUVME_MV_RESULTS))); cutilSafeCall(cudaMalloc((void**) &dev_mb_info,num_mbs*sizeof(CUVME_MB_INFO))); //为输入的数据分配显存空间 cutilSafeCall(cudaMemcpy(dev_blk_mb_info,p_enc->pBlkMBInfo,BLOCKS_PER_MB*sizeof(S_BLK_MB_INFO)*num_mbs,cudaMemcpyHostToDevice)); cutilSafeCall(cudaMalloc((void**) &dev_input_src,enc_width*enc_height*sizeof(char))); //为输入的数据分配显存空间 cutilSafeCall(cudaMemcpy(dev_input_src,i_InputLuma,enc_width*enc_height*sizeof(char),cudaMemcpyHostToDevice)); unsigned int *HR_SAD_dev; cutilSafeCall(cudaMalloc((void**) &HR_SAD_dev,num_mb_hor*num_mb_ver*32*sizeof(unsigned int))); dim3 grid_src(num_mb_hor>>1,num_mb_ver, 1); //grid的设置 dim3 threads_src((MB_WIDTH*2)>>1, MB_HEIGHT>>1, 1); dim3 grid_QR(num_mb_hor/6,num_mb_ver/4,1); dim3 threads_QR(16,16,1); dim3 grid_QR_new(num_mb_hor/6,(num_mb_ver+2)/3,1); dim3 grid_HR_SAD(num_mb_hor,num_mb_ver,1); dim3 threads_HR_SAD(8,4,4); dim3 grid_HR(num_mb_hor/6,num_mb_ver/4,1); dim3 threads_HR(8,4,1); me_Decimate_kernel<<<grid_src,threads_src>>>(dev_input,dev_out_HR_src,dev_out_QR_src,enc_height,enc_width); me_QR_LowresSearch<<<grid_QR_new,threads_QR>>>(dev_out_QR_src, dev_out_QR_ref, dev_mvsLocal, enc_width/LOWRES_DEC_RATIO, enc_height/LOWRES_DEC_RATIO, width_ref/LOWRES_DEC_RATIO, height_ref/LOWRES_DEC_RATIO, num_mb_hor, num_mb_ver, 2*QR_WEIGHT, QR_SEARCH_SIZE, QR_ZERO_BIAS, lambda_factor, skipbias_factor ); me_HR_Cal_Candidate_SAD_kernel<<<grid_HR_SAD,threads_HR_SAD>>>(dev_out_HR_src, dev_out_HR_ref, dev_mvsLocal, enc_width/HLFRES_DEC_RATIO, enc_height/HLFRES_DEC_RATIO, width_ref/HLFRES_DEC_RATIO, height_ref/HLFRES_DEC_RATIO, num_mb_hor, num_mb_ver, HR_SEARCH_SIZE, HR_SAD_dev ); dim3 grid_HR_new(num_mb_hor/6,(num_mb_ver+2)/3,1); me_HR_Candidate_Vote<<<grid_HR_new,threads_HR>>>( HR_SAD_dev, dev_mvsLocal, dev_mb_info, enc_width/HLFRES_DEC_RATIO, enc_height/HLFRES_DEC_RATIO, width_ref/HLFRES_DEC_RATIO, height_ref/HLFRES_DEC_RATIO, num_mb_hor, num_mb_ver, 4*HR_WEIGHT, HR_SEARCH_SIZE, HR_ZERO_BIAS, lambda_factor, (skipbias_factor*3) ); cutilSafeCall(cudaFree(HR_SAD_dev)); } if(do_int_and_halfpel_search||do_int_search) { cutilSafeCall(cudaMalloc((void**) &IntegerPelCenterVecs,num_mb_hor*num_mb_ver*sizeof(int))); cutilSafeCall(cudaMalloc((void**)&dev_out_pred,enc_width*enc_height*sizeof(unsigned char))); cutilSafeCall(cudaMalloc((void**)&integer_mvmap,num_mb_hor*num_mb_ver*sizeof (CUVME_MV_RESULTS))); cutilSafeCall(cudaMalloc((void**)&mb_info,num_mb_hor*num_mb_ver*sizeof(CUVME_MB_INFO))); dim3 grid_MV(1,num_mb_ver,1); dim3 threads_MV(num_mb_hor,1,1); dim3 grid_Int(num_mb_hor,1,1); dim3 threads_Int(16,3,3); me_ClipVec_ForFrame<<<grid_MV,threads_MV>>>(dev_mvsLocal,IntegerPelCenterVecs,p_ME_context->search_range_x, p_ME_context->search_range_y, p_ME_context->candidate_tune_values.integer_clip_range,num_mb_hor,num_mb_ver/*,dev_ref_index,dev_MV*/); //循环处理每一行宏块,因为相邻行宏块之间有数据相关性 me_IntegerSimulsadVote_kernel<<<grid_Int,threads_Int>>>(dev_input_src,dev_input_ref,IntegerPelCenterVecs,integer_mvmap,mb_info,dev_out_pred, num_mb_hor,num_mb_ver,RefStride2Begin,lambda_factor,enc_width,width_ref, dev_blk_mb_info); cutilSafeCall(cudaMemcpy(p_ME_context->p_pred_picture->y,dev_out_pred,enc_width*enc_height*sizeof(unsigned char),cudaMemcpyDeviceToHost)); } p_ME_context->num_lowres_forw_references = 0; p_ME_context->num_lowres_back_references = 0; p_ME_context->LastAvgHRSAD = p_ME_context->AvgHRSAD; p_ME_context->num_sec_formats = 0; if(err == CUVME_ERR_SUCCESS) p_ME_context->curr_state = INIT; p_ME_context->flag_do_mc = 0; p_ME_context->store_dec_src_flag = 0; p_ME_context->get_mb_characs_flag = 0; p_ME_context->num_forw_references = 0; p_ME_context->num_back_references = 0; p_ME_context->ref_frame_distance_fwd = 0; p_ME_context->ref_frame_distance_bwd = 0; } p_sc_me = &p_enc->me_context; pBlkMBInfo = p_enc->pBlkMBInfo; if (!err) { me_err = cuvme_get_avg_var(p_sc_me->me_handle ,&avg_var); if (me_err) { printf ("ME returned error code %d", me_err); err = ERR_FAILURE; } if(!err) err = (E_ERR)cuvrc_set_avg_var(p_rc->rc_handle ,avg_var); if (err) { printf ("RC returned error code %d", err); err = ERR_FAILURE; } } end = clock(); p_enc->new_timers.me_total += (end-start); p_enc->new_timers.pframe_total +=(end -start); start = clock(); //P帧亮度分量的编码 cutilSafeCall(cudaMalloc((void**) &dev_CoeffCosts,BLOCKS_PER_MB*sizeof(int))); cutilSafeCall(cudaMemcpy(dev_recon,(p_rec_ptr),enc_height*width_ref*sizeof(char)-RECON_FRAME_X_OFFSET,cudaMemcpyHostToDevice)); cutilSafeCall(cudaMemcpy(dev_CoeffCosts,CoeffCosts,sizeof(int)*16,cudaMemcpyHostToDevice)); //将量化表载入常量存储器中 cutilSafeCall(cudaMemcpy(dev_QpData,&QpData,sizeof(S_QP_DATA),cudaMemcpyHostToDevice)); cutilSafeCall(cudaMemcpy(dev_Quant_tab,pQuantTable,BLOCKS_PER_MB*sizeof(short),cudaMemcpyHostToDevice)); cutilSafeCall(cudaMemcpy(dev_Dquant_tab,pDQuantTable,BLOCKS_PER_MB*sizeof(short),cudaMemcpyHostToDevice)); dim3 grid_inter((num_mb_hor>>2),num_mb_ver,1); //每一个block处理4个宏块,第一维为每一行宏块应该包含的block数量,第二维是图像的高度(以宏块为单位) dim3 threads_inter(4,4,4); //每一个block有64个线程,第一维对宏块内各个block索引,第二维对宏块进行索引 // cuda kernel dim3 grid_intra(p_enc->i_slice_num,1,1); dim3 threads_intra(4,4,1); //帧间编码 pframe_inter_resudial_coding_luma_kernel<<<grid_inter,threads_inter>>>(dev_input, dev_out_pred, enc_width, dev_recon+RefStride2Begin, width_ref, dev_dct_coefs, dev_Quant_tab, dev_Dquant_tab, dev_QpData, dev_ZigZag, dev_CoeffCosts ); end = clock(); p_enc->new_timers.pframe_residual_inter += (end-start); p_enc->new_timers.pframe_residual_luma += (end-start); p_enc->new_timers.pframe_total += (end -start); start = clock(); //帧内预测编码 dim3 block_intra(4,4,16); pframe_intra_resudial_coding_luma<<<grid_intra,block_intra>>>(dev_input, dev_out_pred, enc_width, dev_recon+RefStride2Begin, width_ref, dev_blk_mb_info, dev_dct_coefs, dev_dc_coefs, dev_Quant_tab, dev_Dquant_tab, dev_QpData, AvgMbSAD, lambda_factor_rc, num_mb_hor, num_mb_ver, p_enc->i_slice_num ); //色度分量的编码过程 end = clock(); p_enc->new_timers.pframe_residual_intra += (end-start); p_enc->new_timers.pframe_residual_luma += (end-start); p_enc->new_timers.pframe_total += (end -start); start = clock(); unsigned char *dev_pred_uv; unsigned char *dev_ref_uv; cutilSafeCall(cudaMalloc((void**) &dev_ref_uv, (width_ref_c)*(height_ref_c)*2*sizeof(unsigned char))); cutilSafeCall(cudaMalloc((void**) &dev_pred_uv, (enc_width_c)*(enc_height_c)*2*sizeof(unsigned char))); cutilSafeCall(cudaMemcpy(dev_ref_uv, p_enc->pRefFrame->u,(width_ref_c)*(height_ref_c)*sizeof(unsigned char),cudaMemcpyHostToDevice)); cutilSafeCall(cudaMemcpy(dev_ref_uv+(width_ref_c)*(height_ref_c), p_enc->pRefFrame->v,(width_ref_c)*(height_ref_c)*sizeof(unsigned char),cudaMemcpyHostToDevice)); dim3 grid_mcc(num_mb_hor,num_mb_ver,1); dim3 threads_mcc(8,8,2); MotionCompensateChroma_kernel<<<grid_mcc,threads_mcc>>> ( dev_ref_uv, dev_pred_uv, dev_blk_mb_info, enc_width_c, enc_height_c, width_ref_c, height_ref_c, RefStride2BeginUV ); cutilSafeCall(cudaMemcpy(p_enc->inter_pred_frame.u,dev_pred_uv,(enc_width_c)*(enc_height_c)*sizeof(char),cudaMemcpyDeviceToHost)); cutilSafeCall(cudaMemcpy(p_enc->inter_pred_frame.v, dev_pred_uv+(enc_width_c)*(enc_height_c),(enc_width_c)*(enc_height_c)*sizeof(char),cudaMemcpyDeviceToHost)); end = clock(); p_enc->new_timers.pframe_residual_chroma += (end-start); p_enc->new_timers.pframe_mc += (end-start); p_enc->new_timers.pframe_total += (end -start); start = clock(); cutilSafeCall(cudaMemcpy(dev_input_uv,p_enc->input_frame.u,(enc_width_c)*(enc_height_c),cudaMemcpyHostToDevice)); //加载U分量 cutilSafeCall(cudaMemcpy(dev_input_uv+(enc_width_c)*(enc_height_c),p_enc->input_frame.v,(enc_width_c)*(enc_height_c),cudaMemcpyHostToDevice));//加载V分量 cutilSafeCall(cudaMemcpy(dev_QpData_uv,&QpData_chroma,sizeof(S_QP_DATA),cudaMemcpyHostToDevice)); cutilSafeCall(cudaMemcpy(Quant_tab_uv,pQuantTable_chroma,BLOCKS_PER_MB*sizeof(short),cudaMemcpyHostToDevice)); cutilSafeCall(cudaMemcpy(Dquant_tab_uv,pDQuantTable_chroma,BLOCKS_PER_MB*sizeof(short),cudaMemcpyHostToDevice)); dim3 grid_intre_c((num_mb_hor>>1),num_mb_ver,1); //一个block处理两种分量的2个8*8宏块,也就是共同4个块 dim3 threads_intre_c(8,4,2); //每一个block有64个线程,前32个线程处理一种分量,每一个宏块需要16个线程 //帧间编码的kernel配置参数 dim3 grid_intra_c(p_enc->i_slice_num,1,1); dim3 threads_intra_c(2,4,1); ChromaPFrameInterResidualCoding_kernel<<<grid_intre_c,threads_intre_c>>> (dev_input_uv, dev_pred_uv, dev_recon_uv+RefStride2BeginUV, dev_dct_coefs_uv, dev_dc_coefs_uv, Quant_tab_uv, Dquant_tab_uv, dev_QpData_uv, enc_width_c, enc_height_c, width_ref_c, height_ref_c, num_mb_hor, num_mb_ver ); end = clock(); p_enc->new_timers.pframe_residual_chroma += (end-start); p_enc->new_timers.pframe_residual_inter += (end-start); p_enc->new_timers.pframe_total += (end -start); start = clock(); Chroma_PFrame_Intra_ResidualCoding_kernel<<<grid_intra_c,threads_intra_c>>> (dev_input_uv, dev_recon_uv+RefStride2BeginUV, dev_blk_mb_info, dev_dct_coefs_uv, dev_dc_coefs_uv, Quant_tab_uv, Dquant_tab_uv, dev_QpData_uv, enc_width_c, enc_height_c, width_ref_c, height_ref_c, num_mb_hor, num_mb_ver, p_enc->i_slice_num ); end = clock(); p_enc->new_timers.pframe_residual_chroma += (end-start); p_enc->new_timers.pframe_residual_intra += (end-start); p_enc->new_timers.pframe_total += (end -start); start = clock(); dim3 grid_cbp_luma(num_mb_hor/8, num_mb_ver, 1); dim3 threads_cbp_luma(BLK_WIDTH, BLK_HEIGHT, 8); CalcCBP_and_TotalCoeff_Luma_cuda<<<grid_cbp_luma,threads_cbp_luma>>>(dev_dct_coefs, dev_blk_mb_info); dim3 grid_cbp_chroma(num_mb_hor/8, num_mb_ver, 1); dim3 threads_cbp_chroma(BLOCKS_PER_MB_C, 8, 2); CalcCBP_and_TotalCoeff_Chroma_cuda<<<grid_cbp_chroma,threads_cbp_chroma>>>(dev_dct_coefs_uv, dev_dc_coefs_uv, dev_blk_mb_info,enc_width_c, enc_height_c); cutilSafeCall(cudaMemcpy(p_enc->pBlkMBInfo,dev_blk_mb_info,BLOCKS_PER_MB*num_mbs*sizeof(S_BLK_MB_INFO),cudaMemcpyDeviceToHost)); cutilSafeCall(cudaFree(dev_pred_uv)); cutilSafeCall(cudaFree(dev_ref_uv)); cutilSafeCall(cudaFree(dev_input_ref)); cutilSafeCall(cudaFree(dev_input_src)); cutilSafeCall(cudaFree(dev_out_HR_src)); cutilSafeCall(cudaFree(dev_out_HR_ref)); cutilSafeCall(cudaFree(dev_out_QR_src)); cutilSafeCall(cudaFree(dev_out_QR_ref)); cutilSafeCall(cudaFree(dev_mvsLocal)); cutilSafeCall(cudaFree(dev_mb_info)); cutilSafeCall(cudaFree(IntegerPelCenterVecs)); cutilSafeCall(cudaFree(integer_mvmap)); cutilSafeCall(cudaFree(mb_info)); cutilSafeCall(cudaFree(dev_out_pred)); cutilSafeCall(cudaFree(dev_CoeffCosts)); end = clock(); p_enc->new_timers.pframe_residual_chroma += (end-start); p_enc->new_timers.pframe_residual_intra += (end-start); p_enc->new_timers.pframe_total += (end -start); start = clock(); frame_sad_sum = 0; for(j = 0; j < num_mb_ver; j++) { for(i = 0; i < num_mb_hor; i++) { frame_sad_sum += (pBlkMBInfo + (j * num_mb_hor + i) * BLOCKS_PER_MB)->MinSAD; } } p_enc->avg_mb_sad = frame_sad_sum / (num_mb_hor * num_mb_ver); end = clock(); p_enc->new_timers.pframe_total += (end -start); } //CAVLC implementation based cuda int I_Slice, FrameQP; bitstream_t *pBitstream; int MBx, MBy; int PrevQP; unsigned int PackedCount; int pPackedSize; unsigned int *pPacked; unsigned int *pPackedCurr; int dummy; int num_encoded_mbs; int MBNum; int *PrevSkipMB = (int *)malloc(p_enc->i_slice_num*sizeof(int)); int *header_bits =(int *)malloc(sizeof(int)) ; int *texture_bits= (int *)malloc(p_enc->i_slice_num*sizeof(int)); ///////////////////////////////////////////////////////////////// // Declare temporary buffers and pointers ///////////////////////////////////////////////////////////////// //int leftover_numbits; unsigned int leftover_value; // Read necessary information from encoder context struct (p_enc) num_encoded_mbs = 0; // Bitstream buffer, before inserting into pBitstream pPackedSize = p_enc->bitstream.buffer_size / 4; pPacked = (unsigned int *)malloc(sizeof(unsigned int) * pPackedSize); pPackedCurr = pPacked; I_Slice = p_enc->frame_info.idr_flag; int Slice_num = p_enc->i_slice_num; *header_bits=0; *texture_bits=0; //leftover_numbits = 0; leftover_value = 0; start = clock(); //short *pDcCoefs_ChromaDC; int *ZigZag_tab; S_CAVLC_CONTEXT_DC_CHROMA *pMBContextOut_LumaDC_dev; S_CAVLC_CONTEXT_DC_CHROMA *pMBContextOut_ChromaDC_dev; int *SkipBlock; int *PrevSkipMB_dev; S_CAVLC_CONTEXT_BLOCK *pMBContextOut_LumaAC_dev; //short *pDctCoefs_ChromaAC; short *pDctCoefs_ZigZag_ChromaAC; S_CAVLC_CONTEXT_DC_CHROMA *pMBContextOut_ChromaAC_dev; S_TEXTURE_SYMBOLS_BLOCK *pTextureSymbols_LumaDC_dev; S_LEVEL_SUFFIX_LENGTH0_SYMBOL_BLOCK *pLevelSymbolSuffixLength0_LumaDC_dev; S_LEVEL_SYMBOLS_BLOCK *pLevelSymbols_LumaDC_dev; S_RUN_SYMBOLS_BLOCK *pRunSymbols_LumaDC_dev; S_TEXTURE_SYMBOLS_BLOCK *pTextureSymbols_LumaAC_dev; S_LEVEL_SUFFIX_LENGTH0_SYMBOL_BLOCK *pLevelSymbolSuffixLength0_LumaAC_dev; S_LEVEL_SYMBOLS_BLOCK *pLevelSymbols_LumaAC_dev; S_RUN_SYMBOLS_BLOCK *pRunSymbols_LumaAC_dev; S_TEXTURE_SYMBOLS_BLOCK *pTextureSymbols_ChromaDC_dev; S_LEVEL_SUFFIX_LENGTH0_SYMBOL_BLOCK *pLevelSymbolSuffixLength0_ChromaDC_dev; S_LEVEL_SYMBOLS_BLOCK *pLevelSymbols_ChromaDC_dev; S_RUN_SYMBOLS_BLOCK *pRunSymbols_ChromaDC_dev; S_TEXTURE_SYMBOLS_BLOCK *pTextureSymbols_ChromaAC_dev; S_LEVEL_SUFFIX_LENGTH0_SYMBOL_BLOCK *pLevelSymbolSuffixLength0_ChromaAC_dev; S_LEVEL_SYMBOLS_BLOCK *pLevelSymbols_ChromaAC_dev; S_RUN_SYMBOLS_BLOCK *pRunSymbols_ChromaAC_dev; SINGLE_CODE *pCodes_LumaDC_dev; SINGLE_CODE *pCodes_LumaAC_dev; SINGLE_CODE *pCodes_ChromaAC_dev; unsigned char *CoeffTokenTable_dev; unsigned char *TotalZerosTable_dev; unsigned int *RunIndexTable_dev; unsigned char *RunTable_dev; cutilSafeCall(cudaMalloc((void**) &pCodes_LumaDC_dev,CODE_PAIRS_PER_LANE*num_mbs*sizeof(SINGLE_CODE))); cutilSafeCall(cudaMalloc((void**) &pCodes_LumaAC_dev,BLOCKS_PER_MB*CODE_PAIRS_PER_LANE*num_mbs*sizeof(SINGLE_CODE))); cutilSafeCall(cudaMalloc((void**) &pCodes_ChromaAC_dev,8*CODE_PAIRS_PER_LANE*num_mbs*sizeof(SINGLE_CODE))); cutilSafeCall(cudaMalloc((void**) &CoeffTokenTable_dev,3*4*17*2*sizeof(unsigned char))); cutilSafeCall(cudaMalloc((void**) &TotalZerosTable_dev,15*16*2*sizeof(unsigned char))); cutilSafeCall(cudaMalloc((void**) &RunIndexTable_dev,7*sizeof(unsigned int))); cutilSafeCall(cudaMalloc((void**) &RunTable_dev,44*2*sizeof(unsigned char))); cutilSafeCall(cudaMemcpy(CoeffTokenTable_dev,CoeffTokenTable,3*4*17*2*sizeof(unsigned char),cudaMemcpyHostToDevice)); cutilSafeCall(cudaMemcpy(TotalZerosTable_dev,TotalZerosTable,15*16*2*sizeof(unsigned char),cudaMemcpyHostToDevice)); cutilSafeCall(cudaMemcpy(RunIndexTable_dev,RunIndexTable,7*sizeof(unsigned int),cudaMemcpyHostToDevice)); cutilSafeCall(cudaMemcpy(RunTable_dev,RunTable,44*2*sizeof(unsigned char),cudaMemcpyHostToDevice)); cutilSafeCall(cudaMemset(pCodes_LumaDC_dev,0,CODE_PAIRS_PER_LANE*num_mbs*sizeof(SINGLE_CODE))); cutilSafeCall(cudaMemset(pCodes_LumaAC_dev,0,16*CODE_PAIRS_PER_LANE*num_mbs*sizeof(SINGLE_CODE))); cutilSafeCall(cudaMemset(pCodes_ChromaAC_dev,0,8*CODE_PAIRS_PER_LANE*num_mbs*sizeof(SINGLE_CODE))); cutilSafeCall(cudaMalloc((void**) &pMBContextOut_LumaDC_dev,num_mbs*sizeof(S_CAVLC_CONTEXT_DC_CHROMA))); cutilSafeCall(cudaMalloc((void**) &pMBContextOut_ChromaDC_dev,num_mbs*sizeof(S_CAVLC_CONTEXT_DC_CHROMA))); //block texture for DC dim3 threads_blk_dc(16, 5, 1); dim3 grid_blk_dc(num_mbs/80, 1, 1); cavlc_block_context_DC_kernel <<<grid_blk_dc,threads_blk_dc>>>(dev_dc_coefs, dev_blk_mb_info, dev_ZigZag/*ZigZag_tab*/, dev_dc_coefs, pMBContextOut_LumaDC_dev, pMBContextOut_ChromaDC_dev, num_mb_hor ); //block contexture for Luma AC cutilSafeCall(cudaMalloc((void**) &pMBContextOut_LumaAC_dev,BLOCKS_PER_MB*num_mbs*sizeof(S_CAVLC_CONTEXT_BLOCK))); cutilSafeCall(cudaMalloc((void**) &SkipBlock,sizeof(int)*num_mbs)); cutilSafeCall(cudaMalloc((void**) &PrevSkipMB_dev,sizeof(int)*Slice_num)); dim3 threads_luma_ac(16, 8, 1); dim3 grid_luma_ac(num_mb_hor/8, num_mb_ver, 1); if(I_Slice ) { cutilSafeCall(cudaMemset(SkipBlock,0,sizeof(int)*num_mbs)); cutilSafeCall(cudaMemset(PrevSkipMB_dev,0,sizeof(int)*Slice_num)); cavlc_block_context_iframe_LumaAC_kernel<<<grid_luma_ac,threads_luma_ac>>> ( dev_dct_coefs, dev_blk_mb_info, dev_ZigZag, dev_dct_coefs, pMBContextOut_LumaAC_dev,\ num_mb_hor ); } else { cavlc_block_context_iframe_LumaAC_kernel<<<grid_luma_ac,threads_luma_ac>>> (dev_dct_coefs, dev_blk_mb_info, dev_ZigZag, dev_dct_coefs, pMBContextOut_LumaAC_dev, num_mb_hor ); dim3 threads_mv(80, 1, 1); dim3 grid_mv(num_mbs/80, 1, 1); CalcPredictedMVRef_16x16_kernel<<<grid_mv,threads_mv>>>( dev_blk_mb_info, pMBContextOut_LumaAC_dev, SkipBlock, num_mb_hor ); dim3 threads_skip(16, 1, 1); dim3 grid_skip(Slice_num, 1, 1); cavlc_block_context_PrevSkipMB_kernel<<<grid_skip,threads_skip>>> ( SkipBlock, PrevSkipMB_dev, pMBContextOut_LumaAC_dev, num_mbs ); } //block contexture for chroma AC //cutilSafeCall(cudaMalloc((void**) &pDctCoefs_ChromaAC,MB_TOTAL_SIZE_C*2*num_mbs*sizeof(short))); cutilSafeCall(cudaMalloc((void**) &pDctCoefs_ZigZag_ChromaAC,MB_TOTAL_SIZE_C*2*num_mbs*sizeof(short))); cutilSafeCall(cudaMalloc((void**) &pMBContextOut_ChromaAC_dev,BLOCKS_PER_MB_C*2*num_mbs*sizeof(S_CAVLC_CONTEXT_DC_CHROMA))); dim3 threads_blk_chrac(16, 4, 2); dim3 grid_blk_chrac(num_mbs/16, 1, 1); cavlc_block_context_ChromaAC_kernel <<<grid_blk_chrac,threads_blk_chrac>>>(dev_dct_coefs_uv, dev_blk_mb_info, dev_ZigZag, pDctCoefs_ZigZag_ChromaAC, pMBContextOut_ChromaAC_dev, num_mb_hor, num_mb_ver ); //texture symbols for luma DC cutilSafeCall(cudaMalloc((void**) &pTextureSymbols_LumaDC_dev,num_mbs*sizeof(S_TEXTURE_SYMBOLS_BLOCK))); cutilSafeCall(cudaMalloc((void**) &pLevelSymbolSuffixLength0_LumaDC_dev,num_mbs*sizeof(S_LEVEL_SUFFIX_LENGTH0_SYMBOL_BLOCK))); cutilSafeCall(cudaMalloc((void**) &pLevelSymbols_LumaDC_dev,BLOCKS_PER_MB*num_mbs*sizeof(S_LEVEL_SYMBOLS_BLOCK))); cutilSafeCall(cudaMalloc((void**) &pRunSymbols_LumaDC_dev,BLOCKS_PER_MB*num_mbs*sizeof(S_RUN_SYMBOLS_BLOCK))); dim3 threads_sym_luma_dc(16, 5, 1); dim3 grid_sym_luma_dc(num_mbs/80, 1, 1); cavlc_texture_symbols_luma_DC_kernel <<<grid_sym_luma_dc,threads_sym_luma_dc>>>(dev_dc_coefs, pMBContextOut_LumaDC_dev, SkipBlock, pTextureSymbols_LumaDC_dev, pLevelSymbolSuffixLength0_LumaDC_dev, pLevelSymbols_LumaDC_dev, pRunSymbols_LumaDC_dev ); //texture symbols for luma ac cutilSafeCall(cudaMalloc((void**) &pTextureSymbols_LumaAC_dev,num_mbs*BLK_SIZE*sizeof(S_TEXTURE_SYMBOLS_BLOCK))); cutilSafeCall(cudaMalloc((void**) &pLevelSymbolSuffixLength0_LumaAC_dev,num_mbs*BLK_SIZE*sizeof(S_LEVEL_SUFFIX_LENGTH0_SYMBOL_BLOCK))); cutilSafeCall(cudaMalloc((void**) &pLevelSymbols_LumaAC_dev,BLOCKS_PER_MB*num_mbs*BLK_SIZE*sizeof(S_LEVEL_SYMBOLS_BLOCK))); cutilSafeCall(cudaMalloc((void**) &pRunSymbols_LumaAC_dev,BLOCKS_PER_MB*num_mbs*BLK_SIZE*sizeof(S_RUN_SYMBOLS_BLOCK))); dim3 threads_sym_luma_ac(16, 8, 1); dim3 grid_sym_luma_ac(num_mb_hor/8, num_mb_ver, 1); cavlc_texture_symbols_luma_AC_kernel <<<grid_sym_luma_ac,threads_sym_luma_ac>>>(dev_dct_coefs, pMBContextOut_LumaAC_dev, SkipBlock, pTextureSymbols_LumaAC_dev, pLevelSymbolSuffixLength0_LumaAC_dev, pLevelSymbols_LumaAC_dev, pRunSymbols_LumaAC_dev ); cutilSafeCall(cudaMalloc((void**) &pTextureSymbols_ChromaDC_dev,num_mbs*2*sizeof(S_TEXTURE_SYMBOLS_BLOCK))); cutilSafeCall(cudaMalloc((void**) &pLevelSymbolSuffixLength0_ChromaDC_dev,num_mbs*2*sizeof(S_LEVEL_SUFFIX_LENGTH0_SYMBOL_BLOCK))); cutilSafeCall(cudaMalloc((void**) &pLevelSymbols_ChromaDC_dev,BLOCKS_PER_MB_C*2*num_mbs*sizeof(S_LEVEL_SYMBOLS_BLOCK))); cutilSafeCall(cudaMalloc((void**) &pRunSymbols_ChromaDC_dev,BLOCKS_PER_MB_C*2*num_mbs*sizeof(S_RUN_SYMBOLS_BLOCK))); dim3 threads_sym_chroma_dc(16, 5, 2); dim3 grid_sym_chroma_dc(num_mbs/80, 1, 1); cavlc_texture_symbols_chroma_DC_kernel <<<grid_sym_chroma_dc,threads_sym_chroma_dc>>>(dev_dc_coefs_uv, pMBContextOut_ChromaDC_dev, SkipBlock, pTextureSymbols_ChromaDC_dev, pLevelSymbolSuffixLength0_ChromaDC_dev, pLevelSymbols_ChromaDC_dev, pRunSymbols_ChromaDC_dev, num_mbs ); //texture symbols for chroma ac cutilSafeCall(cudaMalloc((void**) &pTextureSymbols_ChromaAC_dev,BLOCKS_PER_MB_C*num_mbs*2*sizeof(S_TEXTURE_SYMBOLS_BLOCK))); cutilSafeCall(cudaMalloc((void**) &pLevelSymbolSuffixLength0_ChromaAC_dev,BLOCKS_PER_MB_C*num_mbs*2*sizeof(S_LEVEL_SUFFIX_LENGTH0_SYMBOL_BLOCK))); cutilSafeCall(cudaMalloc((void**) &pLevelSymbols_ChromaAC_dev,MB_TOTAL_SIZE_C*2*num_mbs*sizeof(S_LEVEL_SYMBOLS_BLOCK))); cutilSafeCall(cudaMalloc((void**) &pRunSymbols_ChromaAC_dev,MB_TOTAL_SIZE_C*2*num_mbs*sizeof(S_RUN_SYMBOLS_BLOCK))); dim3 threads_sym_chroma_ac(16, 5, 1); dim3 grid_sym_chroma_ac(num_mb_hor/10, num_mb_ver, 1); cavlc_texture_symbols_chroma_AC_kernel <<<grid_sym_chroma_ac,threads_sym_chroma_ac>>>(pDctCoefs_ZigZag_ChromaAC, pMBContextOut_ChromaAC_dev, SkipBlock, pTextureSymbols_ChromaAC_dev, pLevelSymbolSuffixLength0_ChromaAC_dev, pLevelSymbols_ChromaAC_dev, pRunSymbols_ChromaAC_dev ); dim3 threads_code_luma_dc(16, 5, 1); dim3 grid_code_luma_dc(num_mbs/80, 1, 1); cavlc_texture_codes_luma_DC_kernel <<<grid_code_luma_dc,threads_code_luma_dc>>>( pTextureSymbols_LumaDC_dev, pLevelSymbolSuffixLength0_LumaDC_dev, pLevelSymbols_LumaDC_dev, pRunSymbols_LumaDC_dev, CoeffTokenTable_dev, TotalZerosTable_dev, RunIndexTable_dev, RunTable_dev, SkipBlock, pCodes_LumaDC_dev, texture_bits, num_mbs ); dim3 threads_code_luma_ac(16, 5, 1); dim3 grid_code_luma_ac(num_mb_hor/5, num_mb_ver, 1); cavlc_texture_codes_luma_DC_kernel <<<grid_code_luma_ac,threads_code_luma_ac>>>( pTextureSymbols_LumaAC_dev, pLevelSymbolSuffixLength0_LumaAC_dev, pLevelSymbols_LumaAC_dev, pRunSymbols_LumaAC_dev, CoeffTokenTable_dev, TotalZerosTable_dev, RunIndexTable_dev, RunTable_dev, SkipBlock, pCodes_LumaAC_dev, texture_bits, num_mbs ); dim3 threads_chr(16, 5, 1); dim3 grid_chr(num_mb_hor/10, num_mb_ver, 1); cavlc_texture_codes_luma_DC_kernel <<<grid_chr,threads_chr>>>( pTextureSymbols_ChromaAC_dev, pLevelSymbolSuffixLength0_ChromaAC_dev, pLevelSymbols_ChromaAC_dev, pRunSymbols_ChromaAC_dev, CoeffTokenTable_dev, TotalZerosTable_dev, RunIndexTable_dev, RunTable_dev, SkipBlock, pCodes_ChromaAC_dev, texture_bits, num_mbs ); SINGLE_CODE *pCodes_ChromaDC_dev; unsigned char *CoeffTokenChromaDCTable_dev; unsigned char *TotalZerosChromaDCTable_dev; cutilSafeCall(cudaMalloc((void**) &pCodes_ChromaDC_dev,8*2*num_mbs*sizeof(SINGLE_CODE))); cutilSafeCall(cudaMalloc((void**) &CoeffTokenChromaDCTable_dev,4*5*2*sizeof(unsigned char))); cutilSafeCall(cudaMalloc((void**) &TotalZerosChromaDCTable_dev,3*4*2*sizeof(unsigned char))); cutilSafeCall(cudaMemcpy(CoeffTokenChromaDCTable_dev,CoeffTokenChromaDCTable,4*5*2*sizeof(unsigned char),cudaMemcpyHostToDevice)); cutilSafeCall(cudaMemcpy(TotalZerosChromaDCTable_dev,TotalZerosChromaDCTable,3*4*2*sizeof(unsigned char),cudaMemcpyHostToDevice)); cutilSafeCall(cudaMemset(pCodes_ChromaDC_dev,0,8*2*num_mbs*sizeof(SINGLE_CODE))); dim3 threads_code_chroma_dc(16, 10, 1); dim3 grid_code_chroma_dc(num_mbs/80, 1, 1); cavlc_texture_codes_chroam_DC_kernel<<<grid_code_chroma_dc,threads_code_chroma_dc>>>( pTextureSymbols_ChromaDC_dev, pLevelSymbolSuffixLength0_ChromaDC_dev, pLevelSymbols_ChromaDC_dev, pRunSymbols_ChromaDC_dev, CoeffTokenChromaDCTable_dev, TotalZerosChromaDCTable_dev, RunIndexTable_dev, RunTable_dev, SkipBlock, pCodes_ChromaDC_dev, texture_bits ); //////////////////////////////////////////////////////////////////////////////////////////////////////////////// cutilSafeCall(cudaMemcpy(PrevSkipMB,PrevSkipMB_dev,Slice_num*sizeof(int),cudaMemcpyDeviceToHost)); //对宏块头进行编码 unsigned char *CBPTable_dev; int *HeaderCodeBits_dev; SINGLE_CODE *pCodes_Header_MB_dev; //SINGLE_CODE *pCodes_Header_MB_dev; unsigned int *packed_words_head_dev; unsigned int *word_count_head_dev; int *leftover_numbits_head_dev; unsigned int *leftover_value_head_dev; unsigned int *packed_words_LDC_dev; unsigned int *word_count_LDC_dev; int *leftover_numbits_LDC_dev; unsigned int *leftover_value_LDC_dev; unsigned int *packed_words_LAC_dev; unsigned int *word_count_LAC_dev; int *leftover_numbits_LAC_dev; unsigned int *leftover_value_LAC_dev; unsigned int *packed_words_CDC_dev; unsigned int *word_count_CDC_dev; int *leftover_numbits_CDC_dev; unsigned int *leftover_value_CDC_dev; unsigned int *packed_words_CAC_dev; unsigned int *word_count_CAC_dev; int *leftover_numbits_CAC_dev; unsigned int *leftover_value_CAC_dev; unsigned int *total_packet_word_mb; unsigned int *total_word_count_mb; int *total_leftover_numbits_mb; unsigned int *total_leftover_value_mb; int *shift_bits_dev; unsigned int *out_index_dev; unsigned int *total_packet_word; int *leftover_numbits_slice; unsigned int *leftover_value_slice; unsigned int *word_num_slice; cutilSafeCall(cudaMalloc((void**) &CBPTable_dev,CBP_TABLE_SIZE*sizeof(unsigned char))); cutilSafeCall(cudaMalloc((void**) &HeaderCodeBits_dev,num_mbs*sizeof( int))); cutilSafeCall(cudaMemcpy(CBPTable_dev,CBPTable,CBP_TABLE_SIZE*sizeof(unsigned char),cudaMemcpyHostToDevice)); cutilSafeCall(cudaMemcpy(dev_ZigZag,BlockScan,16*sizeof(unsigned int),cudaMemcpyHostToDevice)); cutilSafeCall(cudaMalloc((void**) &pCodes_Header_MB_dev,11*num_mbs*sizeof(SINGLE_CODE))); cutilSafeCall(cudaMalloc((void**) &packed_words_head_dev,6*num_mbs*sizeof(unsigned int))); cutilSafeCall(cudaMalloc((void**) &word_count_head_dev,num_mbs*sizeof(unsigned int))); cutilSafeCall(cudaMalloc((void**) &leftover_numbits_head_dev,num_mbs*sizeof( int))); cutilSafeCall(cudaMalloc((void**) &leftover_value_head_dev,num_mbs*sizeof(unsigned int))); cutilSafeCall(cudaMalloc((void**) &leftover_numbits_slice,(p_enc->i_slice_num+1)*sizeof (int))); cutilSafeCall(cudaMalloc((void**) &leftover_value_slice,p_enc->i_slice_num*sizeof(unsigned int))); cutilSafeCall(cudaMalloc((void**) &word_num_slice,p_enc->i_slice_num*sizeof(unsigned int))); cutilSafeCall(cudaMemset(pCodes_Header_MB_dev,0,11*num_mbs*sizeof(SINGLE_CODE))); dim3 block_header(num_mb_hor,1,1); dim3 grid_header(num_mb_ver,1,1); int Max_size_head = 11;//P frame mb int shared_mem_size = ((Max_size_head+1)>>1) * num_mb_hor*4; if (I_Slice!=0) //I frame { cavlc_header_codes_Iframe <<<grid_header,block_header>>> ( pMBContextOut_LumaAC_dev, dev_ZigZag, CBPTable_dev, 8, pCodes_Header_MB_dev, //8 element for a I MB 1+4+1+1+1(mbtype,subtype(4*4),CHROMAMODE,CBP,delta_quant) HeaderCodeBits_dev ); } else //p frame { cavlc_header_codes_Pframe <<<grid_header,block_header>>> ( pMBContextOut_LumaAC_dev, SkipBlock, dev_ZigZag, CBPTable_dev, 11, pCodes_Header_MB_dev, //8 element for a I MB 1+4+1+1+1(mbtype,subtype(4*4),CHROMAMODE,CBP,delta_quant) HeaderCodeBits_dev ); } cavlc_bitpack_block_cu<<<grid_header,block_header,shared_mem_size>>> ( pCodes_Header_MB_dev, ((I_Slice) ? 8 : 11), packed_words_head_dev, word_count_head_dev, leftover_numbits_head_dev, leftover_value_head_dev ); int shared_mem_size_ldc = 13*4*num_mb_hor; int shared_mem_size_cdc = 8*4*num_mb_hor; int shared_mem_size_ac = 13*4*128; cutilSafeCall(cudaMalloc((void**) &packed_words_LDC_dev,13*num_mbs*sizeof(unsigned int))); cutilSafeCall(cudaMalloc((void**) &word_count_LDC_dev,num_mbs*sizeof(unsigned int))); cutilSafeCall(cudaMalloc((void**) &leftover_numbits_LDC_dev,num_mbs*sizeof( int))); cutilSafeCall(cudaMalloc((void**) &leftover_value_LDC_dev,num_mbs*sizeof(unsigned int))); cutilSafeCall(cudaMalloc((void**) &packed_words_LAC_dev,13*num_mbs*BLOCKS_PER_MB*sizeof(unsigned int))); cutilSafeCall(cudaMalloc((void**) &word_count_LAC_dev,num_mbs*BLOCKS_PER_MB*sizeof(unsigned int))); cutilSafeCall(cudaMalloc((void**) &leftover_numbits_LAC_dev,num_mbs*BLOCKS_PER_MB*sizeof( int))); cutilSafeCall(cudaMalloc((void**) &leftover_value_LAC_dev,num_mbs*BLOCKS_PER_MB*sizeof(unsigned int))); cutilSafeCall(cudaMalloc((void**) &packed_words_CDC_dev,8*num_mbs*sizeof(unsigned int))); cutilSafeCall(cudaMalloc((void**) &word_count_CDC_dev,num_mbs*sizeof(unsigned int))); cutilSafeCall(cudaMalloc((void**) &leftover_numbits_CDC_dev,num_mbs*sizeof( int))); cutilSafeCall(cudaMalloc((void**) &leftover_value_CDC_dev,num_mbs*sizeof(unsigned int))); cutilSafeCall(cudaMalloc((void**) &packed_words_CAC_dev,13*num_mbs*BLOCKS_PER_MB_C*2*sizeof(unsigned int))); cutilSafeCall(cudaMalloc((void**) &word_count_CAC_dev,num_mbs*BLOCKS_PER_MB_C*2*sizeof(unsigned int))); cutilSafeCall(cudaMalloc((void**) &leftover_numbits_CAC_dev,num_mbs*BLOCKS_PER_MB_C*2*sizeof( int))); cutilSafeCall(cudaMalloc((void**) &leftover_value_CAC_dev,num_mbs*BLOCKS_PER_MB_C*2*sizeof(unsigned int))); cutilSafeCall(cudaMalloc((void**) &total_packet_word_mb,num_mbs*64*sizeof(unsigned int))); cutilSafeCall(cudaMalloc((void**) &total_word_count_mb,num_mbs*sizeof(unsigned int))); cutilSafeCall(cudaMalloc((void**) &total_leftover_numbits_mb,num_mbs*sizeof( int))); cutilSafeCall(cudaMalloc((void**) &total_leftover_value_mb,num_mbs*sizeof(unsigned int))); cutilSafeCall(cudaMalloc((void**) &shift_bits_dev,num_mbs*sizeof(int))); cutilSafeCall(cudaMalloc((void**) &out_index_dev,num_mbs*sizeof(unsigned int))); cutilSafeCall(cudaMalloc((void**) &total_packet_word,num_mbs*64*sizeof(unsigned int))); cutilSafeCall(cudaMemset(total_packet_word_mb,0,num_mbs*64*sizeof(unsigned int))); //直流分量的kernel配置 dim3 block_ldc(num_mb_hor,1,1); dim3 grid_ldc(num_mb_ver,1,1); //交流Luma分量的kernel配置 dim3 block_lac(128,1,1); dim3 grid_lac((num_mbs*BLOCKS_PER_MB/128),1,1); //交流Chroma分量的kernel配置 dim3 block_cac(128,1,1); dim3 grid_cac((num_mbs*BLOCKS_PER_MB_C/64),1,1); //Luma Dc cavlc_bitpack_block_cu<<<grid_ldc,block_ldc,shared_mem_size_ldc>>> ( pCodes_LumaDC_dev, 26, packed_words_LDC_dev, word_count_LDC_dev, leftover_numbits_LDC_dev, leftover_value_LDC_dev ); //Chroma Dc cavlc_bitpack_block_cu<<<grid_ldc,block_ldc,shared_mem_size_cdc>>> ( pCodes_ChromaDC_dev, 16, packed_words_CDC_dev, word_count_CDC_dev, leftover_numbits_CDC_dev, leftover_value_CDC_dev ); //Luma Ac cavlc_bitpack_block_cu<<<grid_lac,block_lac,shared_mem_size_ac>>> ( pCodes_LumaAC_dev, 26, packed_words_LAC_dev, word_count_LAC_dev, leftover_numbits_LAC_dev, leftover_value_LAC_dev ); //Chroma Ac cavlc_bitpack_block_cu<<<grid_cac,block_cac,shared_mem_size_ac>>> ( pCodes_ChromaAC_dev, 26, packed_words_CAC_dev, word_count_CAC_dev, leftover_numbits_CAC_dev, leftover_value_CAC_dev ); cavlc_bitpack_MB_cu<<<grid_ldc,block_ldc>>>( //intput packet codes of head,lumadc,lumaac,chromadc... packed_words_head_dev, packed_words_LDC_dev, packed_words_LAC_dev, packed_words_CDC_dev, packed_words_CAC_dev, word_count_head_dev, word_count_LDC_dev, word_count_LAC_dev, word_count_CDC_dev, word_count_CAC_dev, leftover_numbits_head_dev, leftover_numbits_LDC_dev, leftover_numbits_LAC_dev, leftover_numbits_CDC_dev, leftover_numbits_CAC_dev, leftover_value_head_dev, leftover_value_LDC_dev, leftover_value_LAC_dev, leftover_value_CDC_dev, leftover_value_CAC_dev, dev_ZigZag, 64, ((I_Slice) ? 4 : 6), SkipBlock, //ouput packet words for mb total_packet_word_mb, total_word_count_mb, total_leftover_numbits_mb, total_leftover_value_mb ); dim3 block(num_mbs/p_enc->i_slice_num,1,1); dim3 grid(p_enc->i_slice_num,1,1); compute_out_position<<<grid,block>>>( //input: word of mb and leftover_numbits total_word_count_mb, total_leftover_numbits_mb, //output: out position for mb and shift bits out_index_dev, shift_bits_dev ); parallel_write<<<grid,block>>>( total_packet_word_mb, total_word_count_mb, SkipBlock, total_leftover_numbits_mb, total_leftover_value_mb, out_index_dev, shift_bits_dev, num_mbs/p_enc->i_slice_num, //out_put packet word for slice total_packet_word, word_num_slice, leftover_numbits_slice, leftover_value_slice ); unsigned int *pCodes_packed = (unsigned int*) malloc(BLOCKS_PER_MB*13*num_mbs*sizeof(unsigned int)); unsigned int *word_count = (unsigned int*) malloc(BLOCKS_PER_MB*num_mbs*sizeof(unsigned int)); int *leftover_numbits = ( int*) malloc(BLOCKS_PER_MB*num_mbs*sizeof( int)); unsigned int *left_value = (unsigned int*) malloc(BLOCKS_PER_MB*num_mbs*sizeof(unsigned int)); cutilSafeCall(cudaMemcpy(word_count,word_num_slice,(p_enc->i_slice_num)*sizeof(unsigned int),cudaMemcpyDeviceToHost)); cutilSafeCall(cudaMemcpy(pCodes_packed,total_packet_word,word_count[p_enc->i_slice_num-1]*sizeof(unsigned int),cudaMemcpyDeviceToHost)); cutilSafeCall(cudaMemcpy(leftover_numbits,leftover_numbits_slice,(p_enc->i_slice_num)*sizeof( int),cudaMemcpyDeviceToHost)); cutilSafeCall(cudaMemcpy(left_value,leftover_value_slice,p_enc->i_slice_num*sizeof(unsigned int),cudaMemcpyDeviceToHost)); pPackedCurr = pCodes_packed; int num_word = 0; for( i = 0;i<p_enc->i_slice_num;i++) { encoder_context_t *penc = p_enc->slice[i]; pBitstream = &penc->bitstream; num_word = word_count[i]-num_word; cavlc_put_bits(pPackedCurr, num_word, pBitstream); if (leftover_numbits[i]) put_bits(pBitstream, leftover_numbits[i], left_value[i] >> (32 - leftover_numbits[i])); pPackedCurr +=num_word; num_word = word_count[i]; header_bits += write_last_skip_count(PrevSkipMB[i], pBitstream); } start1 = clock(); cutilSafeCall(cudaFree(pCodes_Header_MB_dev)); cutilSafeCall(cudaFree(packed_words_head_dev)); cutilSafeCall(cudaFree(word_count_head_dev)); cutilSafeCall(cudaFree(leftover_numbits_head_dev)); cutilSafeCall(cudaFree(leftover_value_head_dev)); cutilSafeCall(cudaFree(packed_words_LDC_dev)); cutilSafeCall(cudaFree(word_count_LDC_dev)); cutilSafeCall(cudaFree(leftover_numbits_LDC_dev)); cutilSafeCall(cudaFree(leftover_value_LDC_dev)); cutilSafeCall(cudaFree(packed_words_LAC_dev)); cutilSafeCall(cudaFree(word_count_LAC_dev)); cutilSafeCall(cudaFree(leftover_numbits_LAC_dev)); cutilSafeCall(cudaFree(leftover_value_LAC_dev)); cutilSafeCall(cudaFree(packed_words_CDC_dev)); cutilSafeCall(cudaFree(word_count_CDC_dev)); cutilSafeCall(cudaFree(leftover_numbits_CDC_dev)); cutilSafeCall(cudaFree(leftover_value_CDC_dev)); cutilSafeCall(cudaFree(packed_words_CAC_dev)); cutilSafeCall(cudaFree(word_count_CAC_dev)); cutilSafeCall(cudaFree(leftover_numbits_CAC_dev)); cutilSafeCall(cudaFree(leftover_value_CAC_dev)); cutilSafeCall(cudaFree(out_index_dev)); cutilSafeCall(cudaFree(shift_bits_dev)); cutilSafeCall(cudaFree(total_packet_word)); cutilSafeCall(cudaFree(leftover_numbits_slice)); cutilSafeCall(cudaFree(leftover_value_slice)); cutilSafeCall(cudaFree(word_num_slice)); cutilSafeCall(cudaFree(total_packet_word_mb)); cutilSafeCall(cudaFree(total_word_count_mb)); cutilSafeCall(cudaFree(total_leftover_numbits_mb)); cutilSafeCall(cudaFree(total_leftover_value_mb)); cutilSafeCall(cudaFree(HeaderCodeBits_dev)); cutilSafeCall(cudaFree(CBPTable_dev)); cudaThreadSynchronize(); end = clock(); p_enc->new_timers.cavlc_timers += (end-start); p_enc->new_timers.rc_total += (end-start); cutilSafeCall(cudaFree(pMBContextOut_LumaDC_dev)); cutilSafeCall(cudaFree(pMBContextOut_LumaAC_dev)); cutilSafeCall(cudaFree(SkipBlock)); cutilSafeCall(cudaFree(PrevSkipMB_dev)); cutilSafeCall(cudaFree(pMBContextOut_ChromaDC_dev)); cutilSafeCall(cudaFree(pDctCoefs_ZigZag_ChromaAC)); cutilSafeCall(cudaFree(pMBContextOut_ChromaAC_dev)); cutilSafeCall(cudaFree(pTextureSymbols_LumaDC_dev)); cutilSafeCall(cudaFree(pLevelSymbolSuffixLength0_LumaDC_dev)); cutilSafeCall(cudaFree(pLevelSymbols_LumaDC_dev)); cutilSafeCall(cudaFree(pRunSymbols_LumaDC_dev)); cutilSafeCall(cudaFree(pTextureSymbols_LumaAC_dev)); cutilSafeCall(cudaFree(pLevelSymbolSuffixLength0_LumaAC_dev)); cutilSafeCall(cudaFree(pLevelSymbols_LumaAC_dev)); cutilSafeCall(cudaFree(pRunSymbols_LumaAC_dev)); cutilSafeCall(cudaFree(pTextureSymbols_ChromaDC_dev)); cutilSafeCall(cudaFree(pLevelSymbolSuffixLength0_ChromaDC_dev)); cutilSafeCall(cudaFree(pLevelSymbols_ChromaDC_dev)); cutilSafeCall(cudaFree(pRunSymbols_ChromaDC_dev)); cutilSafeCall(cudaFree(pTextureSymbols_ChromaAC_dev)); cutilSafeCall(cudaFree(pLevelSymbolSuffixLength0_ChromaAC_dev)); cutilSafeCall(cudaFree(pLevelSymbols_ChromaAC_dev)); cutilSafeCall(cudaFree(pRunSymbols_ChromaAC_dev)); cutilSafeCall(cudaFree(pCodes_LumaDC_dev)); cutilSafeCall(cudaFree(pCodes_LumaAC_dev)); cutilSafeCall(cudaFree(pCodes_ChromaAC_dev)); cutilSafeCall(cudaFree(CoeffTokenTable_dev)); cutilSafeCall(cudaFree(TotalZerosTable_dev)); cutilSafeCall(cudaFree(RunIndexTable_dev)); cutilSafeCall(cudaFree(RunTable_dev)); cutilSafeCall(cudaFree(pCodes_ChromaDC_dev)); cutilSafeCall(cudaFree(CoeffTokenChromaDCTable_dev)); cutilSafeCall(cudaFree(TotalZerosChromaDCTable_dev)); cutilSafeCall(cudaFree(dev_input)); cutilSafeCall(cudaFree(dev_dct_coefs)); cutilSafeCall(cudaFree(dev_dc_coefs)); cutilSafeCall(cudaFree(dev_Quant_tab)); cutilSafeCall(cudaFree(dev_Dquant_tab)); cutilSafeCall(cudaFree(dev_QpData)); cutilSafeCall(cudaFree(dev_input_uv)); cutilSafeCall(cudaFree(Quant_tab_uv)); cutilSafeCall(cudaFree(Dquant_tab_uv)); cutilSafeCall(cudaFree(dev_QpData_uv)); cutilSafeCall(cudaFree(dev_dct_coefs_uv)); cutilSafeCall(cudaFree(dev_dc_coefs_uv)); cutilSafeCall(cudaFree(dev_ZigZag)); free(pCodes_packed); free(PrevSkipMB); free(word_count); free(leftover_numbits); free(left_value); end1 = clock(); p_enc->new_timers.prep_encode_frame += (end1 - start1); E_ERR err = ERR_SUCCESS; start = clock(); S_BLK_MB_INFO *pBlkMBInfo = p_enc->pBlkMBInfo; int disable_deblocking_filter_idc = p_enc->loopfilter_params.disable_flag; int slice_alpha_c0_offset = p_enc->loopfilter_params.alpha_c0_offset; int slice_beta_offset = p_enc->loopfilter_params.beta_offset; yuv_frame_t *frame = p_enc->pRecFrame; // Input & Output unsigned char *BSRef_d; unsigned char *QP_TO_Chroma_dev; unsigned char *ALPHA_Table_dev; unsigned char *BETA_Table_dev; unsigned char *CLIP_Table_dev; cutilSafeCall(cudaMalloc((void**)&BSRef_d,sizeof(unsigned char)*2*BLOCKS_PER_MB*num_mb_hor*num_mb_ver)); cutilSafeCall(cudaMalloc((void**)&ALPHA_Table_dev,sizeof(unsigned char)*NUM_QP)); cutilSafeCall(cudaMalloc((void**)&BETA_Table_dev,sizeof(unsigned char)*NUM_QP)); cutilSafeCall(cudaMalloc((void**)&CLIP_Table_dev,sizeof(unsigned char)*NUM_QP*5)); cutilSafeCall(cudaMalloc((void**)&QP_TO_Chroma_dev,sizeof(unsigned char)*NUM_QP)); cutilSafeCall(cudaMemcpy(ALPHA_Table_dev,ALPHA_TABLE,sizeof(unsigned char)*NUM_QP,cudaMemcpyHostToDevice)); cutilSafeCall(cudaMemcpy(BETA_Table_dev,BETA_TABLE,sizeof(unsigned char)*NUM_QP,cudaMemcpyHostToDevice)); cutilSafeCall(cudaMemcpy(CLIP_Table_dev,CLIP_TAB,sizeof(unsigned char)*NUM_QP*5,cudaMemcpyHostToDevice)); cutilSafeCall(cudaMemcpy(QP_TO_Chroma_dev,QP_TO_CHROMA_MAPPING,sizeof(unsigned char)*NUM_QP,cudaMemcpyHostToDevice)); dim3 dimblock(BLOCKS_PER_MB,8,1); //一个线程处理一个边的一条边界的横向和纵向边界强度,一个宏块需要的线程数为16,每个线程块处理8个MB dim3 dimgrid(num_mb_hor/8,num_mb_ver,1); dim3 block_ver(16,2,1); dim3 grid_ver((num_mb_ver+1)>>1,2,1); dim3 block_hor(16,2,1); dim3 grid_hor(num_mb_hor>>1,2,1); cudaCalcBoundaryStrength_kernel<<<dimgrid,dimblock>>> (dev_blk_mb_info, BSRef_d, disable_deblocking_filter_idc, num_mb_hor, num_mb_ver, p_enc->i_slice_num, I_Slice); cudaDeblockMB_kernel_ver<<<grid_ver,block_ver>>> ( BSRef_d, QP, num_mb_hor, num_mb_ver, width_ref, height_ref, RECON_FRAME_Y_OFFSET* width_ref + RECON_FRAME_X_OFFSET, RECON_FRAME_Y_OFFSET_C * width_ref_c + RECON_FRAME_X_OFFSET_C, dev_recon, dev_recon_uv, dev_recon_uv+(width_ref*height_ref>>2), QP_TO_Chroma_dev, ALPHA_Table_dev, BETA_Table_dev, CLIP_Table_dev ); cudaDeblockMB_kernel_hor<<<grid_hor,block_hor>>> ( BSRef_d, QP, num_mb_hor, num_mb_ver, width_ref, height_ref, RECON_FRAME_Y_OFFSET* width_ref + RECON_FRAME_X_OFFSET, RECON_FRAME_Y_OFFSET_C * width_ref_c + RECON_FRAME_X_OFFSET_C, dev_recon, dev_recon_uv, dev_recon_uv+(width_ref*height_ref>>2), QP_TO_Chroma_dev, ALPHA_Table_dev, BETA_Table_dev, CLIP_Table_dev ); cutilSafeCall(cudaMemcpy(frame->y,dev_recon,width_ref*height_ref*sizeof(unsigned char),cudaMemcpyDeviceToHost)); cutilSafeCall(cudaMemcpy(frame->u,dev_recon_uv,width_ref*height_ref*sizeof(unsigned char)>>2,cudaMemcpyDeviceToHost)); cutilSafeCall(cudaMemcpy(frame->v,dev_recon_uv+(width_ref*height_ref>>2),width_ref*height_ref*sizeof(unsigned char)>>2,cudaMemcpyDeviceToHost)); cutilSafeCall(cudaFree(BSRef_d)); cutilSafeCall(cudaFree(QP_TO_Chroma_dev)); cutilSafeCall(cudaFree(ALPHA_Table_dev)); cutilSafeCall(cudaFree(CLIP_Table_dev)); cutilSafeCall(cudaFree(BETA_Table_dev)); cutilSafeCall(cudaFree(dev_blk_mb_info)); cutilSafeCall(cudaFree(dev_recon)); cutilSafeCall(cudaFree(dev_recon_uv)); pad_deblock_out_frame(p_enc->pRecFrame, REFERENCE_FRAME_PAD_AMT); // dec end = clock(); p_enc->new_timers.de_block +=(end - start); }
9410def65873bb17314c7422a6ccde4dd4c1dab1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cutil_inline.h> #include <sys/time.h> __global__ void addMatrix( float *C, float *A, float *B, int N) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int idx = i + j * N; if (i < N && j < N) C[idx] = A[idx] + B[idx]; } void call_addMatrix(float *C, float *A, float *B, int N) { static int first_time = 1; if (first_time) { hipSetDevice(cutGetMaxGflopsDeviceId()); first_time = 0; } int devID; hipDeviceProp_t props; cutilSafeCall(hipGetDevice(&devID)); cutilSafeCall(hipGetDeviceProperties(&props, devID)); printf("Device %d: \"%s\" with Compute %d.%d capability\n", devID, props.name, props.major, props.minor); float* d_A; cutilSafeCall(hipMalloc((void**) &d_A, sizeof(*A) * N * N)); float* d_B; cutilSafeCall(hipMalloc((void**) &d_B, sizeof(*B) * N * N)); cutilSafeCall(hipMemcpy(d_A, A, sizeof(*A) * N * N, hipMemcpyHostToDevice) ); cutilSafeCall(hipMemcpy(d_B, B, sizeof(*B) * N * N, hipMemcpyHostToDevice) ); float* d_C; cutilSafeCall(hipMalloc((void**) &d_C, sizeof(*C) * N * N)); dim3 dimBlock(16, 16); dim3 dimGrid(N / dimBlock.x, N / dimBlock.y); hipLaunchKernelGGL(( addMatrix), dim3(dimGrid), dim3(dimBlock), 0, 0, d_C, d_A, d_B, N); struct timeval time_start; gettimeofday(&time_start, NULL); for (int i = 1024; i != 0; --i) { hipLaunchKernelGGL(( addMatrix), dim3(dimGrid), dim3(dimBlock), 0, 0, d_C, d_A, d_B, N); } hipDeviceSynchronize(); cutilCheckMsg("Kernel execution failed"); struct timeval time_end; gettimeofday(&time_end, NULL); double time_float = (time_end.tv_sec - time_start.tv_sec) + (time_end.tv_usec - time_start.tv_usec) * 0.000001; printf("CUDA completed 1024 runs in %f\n", time_float); cutilSafeCall(hipMemcpy(C, d_C, sizeof(*C) * N * N, hipMemcpyDeviceToHost) ); cutilSafeCall(hipFree(d_A)); cutilSafeCall(hipFree(d_B)); cutilSafeCall(hipFree(d_C)); }
9410def65873bb17314c7422a6ccde4dd4c1dab1.cu
#include <cutil_inline.h> #include <sys/time.h> __global__ void addMatrix( float *C, float *A, float *B, int N) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int idx = i + j * N; if (i < N && j < N) C[idx] = A[idx] + B[idx]; } void call_addMatrix(float *C, float *A, float *B, int N) { static int first_time = 1; if (first_time) { cudaSetDevice(cutGetMaxGflopsDeviceId()); first_time = 0; } int devID; cudaDeviceProp props; cutilSafeCall(cudaGetDevice(&devID)); cutilSafeCall(cudaGetDeviceProperties(&props, devID)); printf("Device %d: \"%s\" with Compute %d.%d capability\n", devID, props.name, props.major, props.minor); float* d_A; cutilSafeCall(cudaMalloc((void**) &d_A, sizeof(*A) * N * N)); float* d_B; cutilSafeCall(cudaMalloc((void**) &d_B, sizeof(*B) * N * N)); cutilSafeCall(cudaMemcpy(d_A, A, sizeof(*A) * N * N, cudaMemcpyHostToDevice) ); cutilSafeCall(cudaMemcpy(d_B, B, sizeof(*B) * N * N, cudaMemcpyHostToDevice) ); float* d_C; cutilSafeCall(cudaMalloc((void**) &d_C, sizeof(*C) * N * N)); dim3 dimBlock(16, 16); dim3 dimGrid(N / dimBlock.x, N / dimBlock.y); addMatrix<<<dimGrid, dimBlock>>>(d_C, d_A, d_B, N); struct timeval time_start; gettimeofday(&time_start, NULL); for (int i = 1024; i != 0; --i) { addMatrix<<<dimGrid, dimBlock>>>(d_C, d_A, d_B, N); } cudaThreadSynchronize(); cutilCheckMsg("Kernel execution failed"); struct timeval time_end; gettimeofday(&time_end, NULL); double time_float = (time_end.tv_sec - time_start.tv_sec) + (time_end.tv_usec - time_start.tv_usec) * 0.000001; printf("CUDA completed 1024 runs in %f\n", time_float); cutilSafeCall(cudaMemcpy(C, d_C, sizeof(*C) * N * N, cudaMemcpyDeviceToHost) ); cutilSafeCall(cudaFree(d_A)); cutilSafeCall(cudaFree(d_B)); cutilSafeCall(cudaFree(d_C)); }
351bb43a42f07eb851128cd49b58dc933467f23e.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <fstream> #include <string.h> #include <time.h> #include <math.h> #include <hip/hip_runtime.h> #include <device_launch_parameters.h> using namespace std; int index(int i) { return i + 1; } // Blocksize #define BLOCKSIZE 64 // Number of mesh points int n = 60000; //************************************************* // Swap two pointers to float // ************************************************ void swap_pointers(float **a, float **b) { float *tmp = *a; *a = *b; *b = tmp; } //************************************************* // GLOBAL MEMORY VERSION OF THE FD UPDATE // ************************************************ __global__ void FD_kernel1(float *d_phi, float *d_phi_new, float cu, int n) { int i = threadIdx.x + blockDim.x * blockIdx.x + 1; // Inner point update if (i < n + 2) d_phi_new[i] = 0.5 * ((d_phi[i + 1] + d_phi[i - 1]) - cu * (d_phi[i + 1] - d_phi[i - 1])); // Boundary Conditions if (i == 1) d_phi_new[0] = d_phi_new[1]; if (i == n + 1) d_phi_new[n + 2] = d_phi_new[n + 1]; } //************************************************* // TILING VERSION (USES SHARED MEMORY) OF THE FD UPDATE // ************************************************ __global__ void FD_kernel2(float *d_phi, float *d_phi_new, float cu, int n) { int li = threadIdx.x + 1; //local index in shared memory vector int gi = blockDim.x * blockIdx.x + threadIdx.x + 1; // global memory index int lstart = 0; int lend = BLOCKSIZE + 1; __shared__ float s_phi[BLOCKSIZE + 2]; //shared mem. vector float result; // Load Tile in shared memory if (gi < n + 2) s_phi[li] = d_phi[gi]; if (threadIdx.x == 0) // First Thread (in the current block) s_phi[lstart] = d_phi[gi - 1]; if (threadIdx.x == BLOCKSIZE - 1) // Last Thread if (gi >= n + 1) // Last Block s_phi[(n + 2) % BLOCKSIZE] = d_phi[n + 2]; else s_phi[lend] = d_phi[gi + 1]; __syncthreads(); if (gi < n + 2) { // Lax-Friedrichs Update result = 0.5 * ((s_phi[li + 1] + s_phi[li - 1]) - cu * (s_phi[li + 1] - s_phi[li - 1])); d_phi_new[gi] = result; } // Boundary Conditions if (gi == 1) d_phi_new[0] = d_phi_new[1]; if (gi == n + 1) d_phi_new[n + 2] = d_phi_new[n + 1]; } //****************************** //**** MAIN FUNCTION *********** int main(int argc, char *argv[]) { //****************************** //Get GPU information int devID; hipDeviceProp_t props; hipError_t err; err = hipGetDevice(&devID); if (err != hipSuccess) { cout << "ERRORRR" << endl; } hipGetDeviceProperties(&props, devID); printf("Device %d: \"%s\" with Compute %d.%d capability\n", devID, props.name, props.major, props.minor); cout << "Introduce number of points (1000-200000)" << endl; cin >> n; // Domain size (periodic) float l = 10.0; // Grid float dx = l / n; // Advecting velocity float u = 1.0; //Timestep size float dt = 0.8 * u * dx; float tend = 2.5; // Courant number float cu = u * dt / dx; //Number of steps to take int nsteps = (int)ceil(tend / dt); cout << "dx=" << dx << "... dt= " << dt << "...Courant= " << cu << endl; cout << endl; cout << "Number of time steps=" << nsteps << endl; //Mesh Definition blockDim.x*blockIdx.x float *phi = new float[n + 3]; float *phi_new = new float[n + 3]; float *phi_GPU = new float[n + 3]; float xx[n + 1]; for (int i = 0; i <= n; i++) xx[i] = -5.0 + i * dx; // Initial values for phi--> Gaussian for (int i = 0; i <= n; i++) { // Gaussian phi[index(i)] = (1.0 / (2.0 * M_PI * 0.16)) * exp(-0.5 * (pow((xx[i] - 0.5), 2) / 0.01)); } //************************** // GPU phase //************************** int size = (n + 3) * sizeof(float); // Allocation in device mem. for d_phi float *d_phi = NULL; err = hipMalloc((void **)&d_phi, size); if (err != hipSuccess) { cout << "ALLOCATION ERROR" << endl; } // Allocation in device mem. for d_phi_new float *d_phi_new = NULL; err = hipMalloc((void **)&d_phi_new, size); if (err != hipSuccess) { cout << "ALLOCATION ERROR" << endl; } // Take initial time double t1 = clock(); // Impose Boundary Conditions phi[index(-1)] = phi[index(0)]; phi[index(n + 1)] = phi[index(n)]; // Copy phi values to device memory err = hipMemcpy(d_phi, phi, size, hipMemcpyHostToDevice); if (err != hipSuccess) { cout << "GPU COPY ERROR" << endl; } // ******************* // Time Step Iteration // ******************* for (int k = 0; k < nsteps; k++) { int blocksPerGrid = (int)ceil((float)(n + 1) / BLOCKSIZE); // ********* Kernel Launch ************************************ hipLaunchKernelGGL(( FD_kernel2), dim3(blocksPerGrid), dim3(BLOCKSIZE), 0, 0, d_phi, d_phi_new, cu, n); // ************************************************************ err = hipGetLastError(); if (err != hipSuccess) { fprintf(stderr, "Failed to launch kernel! %d \n", err); exit(EXIT_FAILURE); } swap_pointers(&d_phi, &d_phi_new); } hipMemcpy(phi_GPU, d_phi, size, hipMemcpyDeviceToHost); double Tgpu = clock(); Tgpu = (Tgpu - t1) / CLOCKS_PER_SEC; //************************** // CPU phase //************************** double t1cpu = clock(); for (int k = 0; k < nsteps; k++) { // Impose Boundary Conditions phi[index(-1)] = phi[index(0)]; phi[index(n + 1)] = phi[index(n)]; for (int i = 0; i <= n; i++) { float phi_i = phi[index(i)]; float phi_ip1 = phi[index(i + 1)]; float phi_im1 = phi[index(i - 1)]; //Lax-Friedrichs phi_new[index(i)] = 0.5 * ((phi_ip1 + phi_im1) - cu * (phi_ip1 - phi_im1)); } swap_pointers(&phi, &phi_new); } double Tcpu = clock(); Tcpu = (Tcpu - t1cpu) / CLOCKS_PER_SEC; cout << endl; cout << "GPU Time= " << Tgpu << endl << endl; cout << "CPU Time= " << Tcpu << endl << endl; //************************** // CPU-GPU comparison and error checking //************************** int passed = 1; int i = 0; while (passed && i < n) { double diff = fabs((double)phi_GPU[index(i)] - (double)phi[index(i)]); if (diff > 1.0e-5) { passed = 0; cout << "DIFF= " << diff << endl; } i++; } if (passed) cout << "PASSED TEST !!!" << endl; else cout << "ERROR IN TEST !!!" << endl; cout << endl; cout << "Speedup (T_CPU/T_GPU)= " << Tcpu / Tgpu << endl; return 0; }
351bb43a42f07eb851128cd49b58dc933467f23e.cu
#include <iostream> #include <fstream> #include <string.h> #include <time.h> #include <math.h> #include <cuda_runtime.h> #include <device_launch_parameters.h> using namespace std; int index(int i) { return i + 1; } // Blocksize #define BLOCKSIZE 64 // Number of mesh points int n = 60000; //************************************************* // Swap two pointers to float // ************************************************ void swap_pointers(float **a, float **b) { float *tmp = *a; *a = *b; *b = tmp; } //************************************************* // GLOBAL MEMORY VERSION OF THE FD UPDATE // ************************************************ __global__ void FD_kernel1(float *d_phi, float *d_phi_new, float cu, int n) { int i = threadIdx.x + blockDim.x * blockIdx.x + 1; // Inner point update if (i < n + 2) d_phi_new[i] = 0.5 * ((d_phi[i + 1] + d_phi[i - 1]) - cu * (d_phi[i + 1] - d_phi[i - 1])); // Boundary Conditions if (i == 1) d_phi_new[0] = d_phi_new[1]; if (i == n + 1) d_phi_new[n + 2] = d_phi_new[n + 1]; } //************************************************* // TILING VERSION (USES SHARED MEMORY) OF THE FD UPDATE // ************************************************ __global__ void FD_kernel2(float *d_phi, float *d_phi_new, float cu, int n) { int li = threadIdx.x + 1; //local index in shared memory vector int gi = blockDim.x * blockIdx.x + threadIdx.x + 1; // global memory index int lstart = 0; int lend = BLOCKSIZE + 1; __shared__ float s_phi[BLOCKSIZE + 2]; //shared mem. vector float result; // Load Tile in shared memory if (gi < n + 2) s_phi[li] = d_phi[gi]; if (threadIdx.x == 0) // First Thread (in the current block) s_phi[lstart] = d_phi[gi - 1]; if (threadIdx.x == BLOCKSIZE - 1) // Last Thread if (gi >= n + 1) // Last Block s_phi[(n + 2) % BLOCKSIZE] = d_phi[n + 2]; else s_phi[lend] = d_phi[gi + 1]; __syncthreads(); if (gi < n + 2) { // Lax-Friedrichs Update result = 0.5 * ((s_phi[li + 1] + s_phi[li - 1]) - cu * (s_phi[li + 1] - s_phi[li - 1])); d_phi_new[gi] = result; } // Boundary Conditions if (gi == 1) d_phi_new[0] = d_phi_new[1]; if (gi == n + 1) d_phi_new[n + 2] = d_phi_new[n + 1]; } //****************************** //**** MAIN FUNCTION *********** int main(int argc, char *argv[]) { //****************************** //Get GPU information int devID; cudaDeviceProp props; cudaError_t err; err = cudaGetDevice(&devID); if (err != cudaSuccess) { cout << "ERRORRR" << endl; } cudaGetDeviceProperties(&props, devID); printf("Device %d: \"%s\" with Compute %d.%d capability\n", devID, props.name, props.major, props.minor); cout << "Introduce number of points (1000-200000)" << endl; cin >> n; // Domain size (periodic) float l = 10.0; // Grid float dx = l / n; // Advecting velocity float u = 1.0; //Timestep size float dt = 0.8 * u * dx; float tend = 2.5; // Courant number float cu = u * dt / dx; //Number of steps to take int nsteps = (int)ceil(tend / dt); cout << "dx=" << dx << "... dt= " << dt << "...Courant= " << cu << endl; cout << endl; cout << "Number of time steps=" << nsteps << endl; //Mesh Definition blockDim.x*blockIdx.x float *phi = new float[n + 3]; float *phi_new = new float[n + 3]; float *phi_GPU = new float[n + 3]; float xx[n + 1]; for (int i = 0; i <= n; i++) xx[i] = -5.0 + i * dx; // Initial values for phi--> Gaussian for (int i = 0; i <= n; i++) { // Gaussian phi[index(i)] = (1.0 / (2.0 * M_PI * 0.16)) * exp(-0.5 * (pow((xx[i] - 0.5), 2) / 0.01)); } //************************** // GPU phase //************************** int size = (n + 3) * sizeof(float); // Allocation in device mem. for d_phi float *d_phi = NULL; err = cudaMalloc((void **)&d_phi, size); if (err != cudaSuccess) { cout << "ALLOCATION ERROR" << endl; } // Allocation in device mem. for d_phi_new float *d_phi_new = NULL; err = cudaMalloc((void **)&d_phi_new, size); if (err != cudaSuccess) { cout << "ALLOCATION ERROR" << endl; } // Take initial time double t1 = clock(); // Impose Boundary Conditions phi[index(-1)] = phi[index(0)]; phi[index(n + 1)] = phi[index(n)]; // Copy phi values to device memory err = cudaMemcpy(d_phi, phi, size, cudaMemcpyHostToDevice); if (err != cudaSuccess) { cout << "GPU COPY ERROR" << endl; } // ******************* // Time Step Iteration // ******************* for (int k = 0; k < nsteps; k++) { int blocksPerGrid = (int)ceil((float)(n + 1) / BLOCKSIZE); // ********* Kernel Launch ************************************ FD_kernel2<<<blocksPerGrid, BLOCKSIZE>>>(d_phi, d_phi_new, cu, n); // ************************************************************ err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "Failed to launch kernel! %d \n", err); exit(EXIT_FAILURE); } swap_pointers(&d_phi, &d_phi_new); } cudaMemcpy(phi_GPU, d_phi, size, cudaMemcpyDeviceToHost); double Tgpu = clock(); Tgpu = (Tgpu - t1) / CLOCKS_PER_SEC; //************************** // CPU phase //************************** double t1cpu = clock(); for (int k = 0; k < nsteps; k++) { // Impose Boundary Conditions phi[index(-1)] = phi[index(0)]; phi[index(n + 1)] = phi[index(n)]; for (int i = 0; i <= n; i++) { float phi_i = phi[index(i)]; float phi_ip1 = phi[index(i + 1)]; float phi_im1 = phi[index(i - 1)]; //Lax-Friedrichs phi_new[index(i)] = 0.5 * ((phi_ip1 + phi_im1) - cu * (phi_ip1 - phi_im1)); } swap_pointers(&phi, &phi_new); } double Tcpu = clock(); Tcpu = (Tcpu - t1cpu) / CLOCKS_PER_SEC; cout << endl; cout << "GPU Time= " << Tgpu << endl << endl; cout << "CPU Time= " << Tcpu << endl << endl; //************************** // CPU-GPU comparison and error checking //************************** int passed = 1; int i = 0; while (passed && i < n) { double diff = fabs((double)phi_GPU[index(i)] - (double)phi[index(i)]); if (diff > 1.0e-5) { passed = 0; cout << "DIFF= " << diff << endl; } i++; } if (passed) cout << "PASSED TEST !!!" << endl; else cout << "ERROR IN TEST !!!" << endl; cout << endl; cout << "Speedup (T_CPU/T_GPU)= " << Tcpu / Tgpu << endl; return 0; }
b52e7b7f852e71bec3c0a220749aae20467a1989.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * File for cuda_usage (and compiler) tests. */ extern "C" { #include<cstdio> __device__ void thread_print(int tID, int bID){ // thread id, block id char tabs[] = {'\t', '\t', '\t', '\t','\t', '\t', '\t', '\t'}; tabs[bID%8] = 0; printf("%s%4d\n",tabs,tID); } __global__ void cuFun(int x){ int tID = (threadIdx.z * blockDim.y * blockDim.x) + (threadIdx.y * blockDim.x) + threadIdx.x; if(tID & 127) // print every 128th return; int bID = (blockIdx.z * gridDim.y * gridDim.x) + (blockIdx.y * gridDim.x) + blockIdx.x; thread_print(tID, bID); } }
b52e7b7f852e71bec3c0a220749aae20467a1989.cu
/* * File for cuda_usage (and compiler) tests. */ extern "C" { #include<cstdio> __device__ void thread_print(int tID, int bID){ // thread id, block id char tabs[] = {'\t', '\t', '\t', '\t','\t', '\t', '\t', '\t'}; tabs[bID%8] = 0; printf("%s%4d\n",tabs,tID); } __global__ void cuFun(int x){ int tID = (threadIdx.z * blockDim.y * blockDim.x) + (threadIdx.y * blockDim.x) + threadIdx.x; if(tID & 127) // print every 128th return; int bID = (blockIdx.z * gridDim.y * gridDim.x) + (blockIdx.y * gridDim.x) + blockIdx.x; thread_print(tID, bID); } }
5d70716f700b5acaf51fed9201ba19d5000cc2b0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* kernel routine starts with keyword __global__ */ __global__ void vecadd(float* A, float* B, float* C) { int i = threadIdx.x; // threadIdx is a CUDA built-in variable C[i] = A[i] + B[i]; } int main(int argc, char * argv[]) { float *host_A, *host_B, *host_C; float *dev_A, *dev_B, *dev_C; int n; if (argc == 1) n = 1024; else n = atoi(argv[1]); /* 1. allocate host memory */ host_A = (float*)malloc( n*sizeof(float) ); host_B = (float*)malloc( n*sizeof(float) ); host_C = (float*)malloc( n*sizeof(float) ); /* 2. allocate GPU memory */ hipMalloc( &dev_A, n*sizeof(float) ); hipMalloc( &dev_B, n*sizeof(float) ); hipMalloc( &dev_C, n*sizeof(float) ); /* initialize array A and B */ for( int i = 0; i < n; ++i ) { host_A[i] = (float) i; host_B[i] = (float) i; } /* 3. Copydata (host_A and host_B) to GPU */ hipMemcpy( dev_A, host_A, n*sizeof(float), hipMemcpyHostToDevice ); hipMemcpy( dev_B, host_B, n*sizeof(float), hipMemcpyHostToDevice ); /* 4. call kernel routine to execute on GPU */ /* launch 1 thread per vector-element, 1024 threads per block */ hipLaunchKernelGGL(( vecadd), dim3(1),dim3(n), 0, 0, dev_A, dev_B, dev_C ); /* transfer results from GPU (dev_C) to CPU (host_C) */ hipMemcpy( host_C, dev_C, n*sizeof(float), hipMemcpyDeviceToHost ); /* free host and GPU memory */ free(host_A); free(host_B); free(host_C); hipFree(dev_A); hipFree(dev_B); hipFree(dev_C); return( 0 ); }
5d70716f700b5acaf51fed9201ba19d5000cc2b0.cu
/* kernel routine starts with keyword __global__ */ __global__ void vecadd(float* A, float* B, float* C) { int i = threadIdx.x; // threadIdx is a CUDA built-in variable C[i] = A[i] + B[i]; } int main(int argc, char * argv[]) { float *host_A, *host_B, *host_C; float *dev_A, *dev_B, *dev_C; int n; if (argc == 1) n = 1024; else n = atoi(argv[1]); /* 1. allocate host memory */ host_A = (float*)malloc( n*sizeof(float) ); host_B = (float*)malloc( n*sizeof(float) ); host_C = (float*)malloc( n*sizeof(float) ); /* 2. allocate GPU memory */ cudaMalloc( &dev_A, n*sizeof(float) ); cudaMalloc( &dev_B, n*sizeof(float) ); cudaMalloc( &dev_C, n*sizeof(float) ); /* initialize array A and B */ for( int i = 0; i < n; ++i ) { host_A[i] = (float) i; host_B[i] = (float) i; } /* 3. Copydata (host_A and host_B) to GPU */ cudaMemcpy( dev_A, host_A, n*sizeof(float), cudaMemcpyHostToDevice ); cudaMemcpy( dev_B, host_B, n*sizeof(float), cudaMemcpyHostToDevice ); /* 4. call kernel routine to execute on GPU */ /* launch 1 thread per vector-element, 1024 threads per block */ vecadd<<<1,n>>>( dev_A, dev_B, dev_C ); /* transfer results from GPU (dev_C) to CPU (host_C) */ cudaMemcpy( host_C, dev_C, n*sizeof(float), cudaMemcpyDeviceToHost ); /* free host and GPU memory */ free(host_A); free(host_B); free(host_C); cudaFree(dev_A); cudaFree(dev_B); cudaFree(dev_C); return( 0 ); }
4cce870b56d4d8f7e2ff2577943118cbe7551a6b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include<stdio.h> #include<iostream> #include<math.h> #include<time.h> //namespace for standard lib. using namespace std; //Calculate error value at each position __global__ void calculate_error_points(float *totalError, double *b, double *m, float *pointX, float *pointY, int num_points) { int i = threadIdx.x; for (i; i<num_points; i++) { totalError[i] = powf((pointY[i] - (*m * pointX[i] + *b)), 2.0); } } //calculate for distance between line and points float compute_error_for_line_given_points(double *b, double *m, float *pointX, float *pointY, int num_points) { float pointError[100] = { 0.0, }; float totalError = 0.0; float ret = 0.0; float *d_pointX, *d_pointY, *d_pointError; double *d_b, *d_m; //alocate GPU memory hipMalloc(&d_pointX, 100 * sizeof(float)); hipMalloc(&d_pointY, 100 * sizeof(float)); hipMalloc(&d_pointError, 100 * sizeof(float)); hipMalloc(&d_b, sizeof(double)); hipMalloc(&d_m, sizeof(double)); //copy CPU data to GPU memory hipMemcpy(d_pointX, pointX, 100 * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_pointY, pointY, 100 * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_pointError, pointError, 100 * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_b, b, sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_m, m, sizeof(double), hipMemcpyHostToDevice); //caluation error between line and points calculate_error_points << <1, 100 >> > (d_pointError, d_b, d_m, d_pointX, d_pointY, num_points); //getback data from GPU to CPU hipMemcpy(pointError, d_pointError, 100 * sizeof(float), hipMemcpyDeviceToHost); for (int i = 0; i < 100; i++) { totalError += pointError[i]; } ret = (totalError / float(num_points)); //free memory hipFree(d_pointX); hipFree(d_pointY); hipFree(d_pointError); hipFree(d_b); hipFree(d_m); return ret; } //calculation gradient for updating weight - for backpropagation. __global__ void calculate_step_gradient(float *d_b_gradient, float *d_m_gradient, float *d_N, double *d_new_b, double *d_new_m, double *d_b_current, double *d_m_current, float *d_pointX, float *d_pointY, float *d_learningRate, int num_points) { int i = threadIdx.x; for (int i; i < num_points; i++) { *d_b_gradient += -(2 / (*d_N)) * (d_pointY[i] - ((*d_m_current * d_pointX[i]) + *d_b_current)); *d_m_gradient += -(2 / (*d_N)) * d_pointX[i] * (d_pointY[i] - ((*d_m_current * d_pointX[i]) + *d_b_current)); *d_new_b = *d_b_current - (*d_learningRate * (*d_b_gradient)); *d_new_m = *d_m_current - (*d_learningRate * (*d_m_gradient)); } } void step_gradient(double *new_b, double *new_m, double *b_current, double *m_current, float *pointX, float *pointY, float learningRate, int num_points) { float b_gradient = 0.0; float m_gradient = 0.0; float N = float(num_points); float *d_pointX, *d_pointY; float *d_b_gradient, *d_m_gradient, *d_N, *d_learningRate; double *d_new_b, *d_new_m, *d_b_current, *d_m_current; //alocate hipMalloc(&d_pointX, 100 * sizeof(float)); hipMalloc(&d_pointY, 100 * sizeof(float)); hipMalloc(&d_b_gradient, sizeof(float)); hipMalloc(&d_m_gradient, sizeof(float)); hipMalloc(&d_N, sizeof(float)); hipMalloc(&d_new_b, sizeof(double)); hipMalloc(&d_new_m, sizeof(double)); hipMalloc(&d_b_current, sizeof(double)); hipMalloc(&d_m_current, sizeof(double)); hipMalloc(&d_learningRate, sizeof(float)); //copy hipMemcpy(d_pointX, pointX, 100 * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_pointY, pointY, 100 * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_b_gradient, &b_gradient, sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_m_gradient, &m_gradient, sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_N, &N, sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_new_b, new_b, sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_new_m, new_m, sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_b_current, b_current, sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_m_current, m_current, sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_learningRate, &learningRate, sizeof(float), hipMemcpyHostToDevice); calculate_step_gradient << <1, 100 >> > (d_b_gradient, d_m_gradient, d_N, d_new_b, d_new_m, d_b_current, d_m_current, d_pointX, d_pointY, d_learningRate, num_points); hipMemcpy(new_b, d_new_b, sizeof(double), hipMemcpyDeviceToHost); hipMemcpy(new_m, d_new_m, sizeof(double), hipMemcpyDeviceToHost); //printf("new_b: %f, new_m: %f\n", *new_b, *new_m); } //new bais and weight update - backpropagation void gradient_descent_runner(double *b, double *m, float *pointX, float *pointY, float starting_b, float starting_m, float learning_rate, int num_iterations) { *b = starting_b; *m = starting_m; double new_b = 0; double new_m = 0; for (int i = 0; i < num_iterations; i++) { step_gradient(&new_b, &new_m, b, m, pointX, pointY, learning_rate, 100); *b = new_b; *m = new_m; //printf("d_b_current: %f, d_m_current: %f\n", new_b, new_m); } } //main event int main() { //check for time interval clock_t begin, end; //start time begin = clock(); float f1, f2; float pointX[100], pointY[100]; FILE *fp; // read CVS fp = fopen("C:/Users/user/Desktop/data.csv", "r"); int i = 0; while (fscanf(fp, "%g,%g\n", &f1, &f2) == 2) { pointX[i] = f1; pointY[i] = f2; //printf("%g, %g\n", f1, f2); i++; } float learning_rate = 0.0001; double initial_b = 0; double initial_m = 0; int num_iterations = 1000000; float error = 0; double b = 0.0; double m = 0.0; //calculate first total error error = compute_error_for_line_given_points(&initial_b, &initial_m, pointX, pointY, 100); printf("Starting gradient descent at b = %f, m = %f, error = %f\n", initial_b, initial_m, error); printf("Running...\n"); //calculation and update weight and bias. gradient_descent_runner(&b, &m, pointX, pointY, initial_b, initial_m, learning_rate, num_iterations); //calculate error after backpropagation error = compute_error_for_line_given_points(&b, &m, pointX, pointY, 100); printf("After %d iterations b = %f, m = %f, error = %f\n", num_iterations, b, m, error); //end time end = clock(); printf("GPU time inverval : %d msec\n",(end - begin)); return 0; }
4cce870b56d4d8f7e2ff2577943118cbe7551a6b.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include<stdio.h> #include<iostream> #include<math.h> #include<time.h> //namespace for standard lib. using namespace std; //Calculate error value at each position __global__ void calculate_error_points(float *totalError, double *b, double *m, float *pointX, float *pointY, int num_points) { int i = threadIdx.x; for (i; i<num_points; i++) { totalError[i] = powf((pointY[i] - (*m * pointX[i] + *b)), 2.0); } } //calculate for distance between line and points float compute_error_for_line_given_points(double *b, double *m, float *pointX, float *pointY, int num_points) { float pointError[100] = { 0.0, }; float totalError = 0.0; float ret = 0.0; float *d_pointX, *d_pointY, *d_pointError; double *d_b, *d_m; //alocate GPU memory cudaMalloc(&d_pointX, 100 * sizeof(float)); cudaMalloc(&d_pointY, 100 * sizeof(float)); cudaMalloc(&d_pointError, 100 * sizeof(float)); cudaMalloc(&d_b, sizeof(double)); cudaMalloc(&d_m, sizeof(double)); //copy CPU data to GPU memory cudaMemcpy(d_pointX, pointX, 100 * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_pointY, pointY, 100 * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_pointError, pointError, 100 * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_m, m, sizeof(double), cudaMemcpyHostToDevice); //caluation error between line and points calculate_error_points << <1, 100 >> > (d_pointError, d_b, d_m, d_pointX, d_pointY, num_points); //getback data from GPU to CPU cudaMemcpy(pointError, d_pointError, 100 * sizeof(float), cudaMemcpyDeviceToHost); for (int i = 0; i < 100; i++) { totalError += pointError[i]; } ret = (totalError / float(num_points)); //free memory cudaFree(d_pointX); cudaFree(d_pointY); cudaFree(d_pointError); cudaFree(d_b); cudaFree(d_m); return ret; } //calculation gradient for updating weight - for backpropagation. __global__ void calculate_step_gradient(float *d_b_gradient, float *d_m_gradient, float *d_N, double *d_new_b, double *d_new_m, double *d_b_current, double *d_m_current, float *d_pointX, float *d_pointY, float *d_learningRate, int num_points) { int i = threadIdx.x; for (int i; i < num_points; i++) { *d_b_gradient += -(2 / (*d_N)) * (d_pointY[i] - ((*d_m_current * d_pointX[i]) + *d_b_current)); *d_m_gradient += -(2 / (*d_N)) * d_pointX[i] * (d_pointY[i] - ((*d_m_current * d_pointX[i]) + *d_b_current)); *d_new_b = *d_b_current - (*d_learningRate * (*d_b_gradient)); *d_new_m = *d_m_current - (*d_learningRate * (*d_m_gradient)); } } void step_gradient(double *new_b, double *new_m, double *b_current, double *m_current, float *pointX, float *pointY, float learningRate, int num_points) { float b_gradient = 0.0; float m_gradient = 0.0; float N = float(num_points); float *d_pointX, *d_pointY; float *d_b_gradient, *d_m_gradient, *d_N, *d_learningRate; double *d_new_b, *d_new_m, *d_b_current, *d_m_current; //alocate cudaMalloc(&d_pointX, 100 * sizeof(float)); cudaMalloc(&d_pointY, 100 * sizeof(float)); cudaMalloc(&d_b_gradient, sizeof(float)); cudaMalloc(&d_m_gradient, sizeof(float)); cudaMalloc(&d_N, sizeof(float)); cudaMalloc(&d_new_b, sizeof(double)); cudaMalloc(&d_new_m, sizeof(double)); cudaMalloc(&d_b_current, sizeof(double)); cudaMalloc(&d_m_current, sizeof(double)); cudaMalloc(&d_learningRate, sizeof(float)); //copy cudaMemcpy(d_pointX, pointX, 100 * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_pointY, pointY, 100 * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_b_gradient, &b_gradient, sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_m_gradient, &m_gradient, sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_N, &N, sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_new_b, new_b, sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_new_m, new_m, sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_b_current, b_current, sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_m_current, m_current, sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_learningRate, &learningRate, sizeof(float), cudaMemcpyHostToDevice); calculate_step_gradient << <1, 100 >> > (d_b_gradient, d_m_gradient, d_N, d_new_b, d_new_m, d_b_current, d_m_current, d_pointX, d_pointY, d_learningRate, num_points); cudaMemcpy(new_b, d_new_b, sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(new_m, d_new_m, sizeof(double), cudaMemcpyDeviceToHost); //printf("new_b: %f, new_m: %f\n", *new_b, *new_m); } //new bais and weight update - backpropagation void gradient_descent_runner(double *b, double *m, float *pointX, float *pointY, float starting_b, float starting_m, float learning_rate, int num_iterations) { *b = starting_b; *m = starting_m; double new_b = 0; double new_m = 0; for (int i = 0; i < num_iterations; i++) { step_gradient(&new_b, &new_m, b, m, pointX, pointY, learning_rate, 100); *b = new_b; *m = new_m; //printf("d_b_current: %f, d_m_current: %f\n", new_b, new_m); } } //main event int main() { //check for time interval clock_t begin, end; //start time begin = clock(); float f1, f2; float pointX[100], pointY[100]; FILE *fp; // read CVS fp = fopen("C:/Users/user/Desktop/data.csv", "r"); int i = 0; while (fscanf(fp, "%g,%g\n", &f1, &f2) == 2) { pointX[i] = f1; pointY[i] = f2; //printf("%g, %g\n", f1, f2); i++; } float learning_rate = 0.0001; double initial_b = 0; double initial_m = 0; int num_iterations = 1000000; float error = 0; double b = 0.0; double m = 0.0; //calculate first total error error = compute_error_for_line_given_points(&initial_b, &initial_m, pointX, pointY, 100); printf("Starting gradient descent at b = %f, m = %f, error = %f\n", initial_b, initial_m, error); printf("Running...\n"); //calculation and update weight and bias. gradient_descent_runner(&b, &m, pointX, pointY, initial_b, initial_m, learning_rate, num_iterations); //calculate error after backpropagation error = compute_error_for_line_given_points(&b, &m, pointX, pointY, 100); printf("After %d iterations b = %f, m = %f, error = %f\n", num_iterations, b, m, error); //end time end = clock(); printf("GPU time inverval : %d msec\n",(end - begin)); return 0; }
cd08115a1aacfb5a46c662c77b6c42125502cdff.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * JCuda - Java bindings for NVIDIA CUDA driver and runtime API * http://www.jcuda.org * * * This code is based on the NVIDIA 'reduction' CUDA sample, * Copyright 1993-2010 NVIDIA Corporation. */ #include <math.h> extern "C" __global__ void reduce(double *g_idata, double *g_odata, unsigned int n) { extern __shared__ double sdata[]; // perform first level of reduction, // reading from global memory, writing to shared memory unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockDim.x*2 + threadIdx.x; unsigned int gridSize = blockDim.x*2*gridDim.x; double mySum = 0; // we reduce multiple elements per thread. The number is determined by the // number of active thread blocks (via gridDim). More blocks will result // in a larger gridSize and therefore fewer elements per thread while (i < n) { mySum += g_idata[i]; // ensure we don't read out of bounds if (i + blockDim.x < n) mySum += g_idata[i+blockDim.x]; i += gridSize; } // each thread puts its local sum into shared memory sdata[tid] = mySum; __syncthreads(); // do reduction in shared mem if (blockDim.x >= 512) { if (tid < 256) { sdata[tid] = mySum = mySum + sdata[tid + 256]; } __syncthreads(); } if (blockDim.x >= 256) { if (tid < 128) { sdata[tid] = mySum = mySum + sdata[tid + 128]; } __syncthreads(); } if (blockDim.x >= 128) { if (tid < 64) { sdata[tid] = mySum = mySum + sdata[tid + 64]; } __syncthreads(); } if (tid < 32) { // now that we are using warp-synchronous programming (below) // we need to declare our shared memory volatile so that the compiler // doesn't reorder stores to it and induce incorrect behavior. volatile double* smem = sdata; if (blockDim.x >= 64) { smem[tid] = mySum = mySum + smem[tid + 32]; } if (blockDim.x >= 32) { smem[tid] = mySum = mySum + smem[tid + 16]; } if (blockDim.x >= 16) { smem[tid] = mySum = mySum + smem[tid + 8]; } if (blockDim.x >= 8) { smem[tid] = mySum = mySum + smem[tid + 4]; } if (blockDim.x >= 4) { smem[tid] = mySum = mySum + smem[tid + 2]; } if (blockDim.x >= 2) { smem[tid] = mySum = mySum + smem[tid + 1]; } } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = sdata[0]; }
cd08115a1aacfb5a46c662c77b6c42125502cdff.cu
/* * JCuda - Java bindings for NVIDIA CUDA driver and runtime API * http://www.jcuda.org * * * This code is based on the NVIDIA 'reduction' CUDA sample, * Copyright 1993-2010 NVIDIA Corporation. */ #include <math.h> extern "C" __global__ void reduce(double *g_idata, double *g_odata, unsigned int n) { extern __shared__ double sdata[]; // perform first level of reduction, // reading from global memory, writing to shared memory unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockDim.x*2 + threadIdx.x; unsigned int gridSize = blockDim.x*2*gridDim.x; double mySum = 0; // we reduce multiple elements per thread. The number is determined by the // number of active thread blocks (via gridDim). More blocks will result // in a larger gridSize and therefore fewer elements per thread while (i < n) { mySum += g_idata[i]; // ensure we don't read out of bounds if (i + blockDim.x < n) mySum += g_idata[i+blockDim.x]; i += gridSize; } // each thread puts its local sum into shared memory sdata[tid] = mySum; __syncthreads(); // do reduction in shared mem if (blockDim.x >= 512) { if (tid < 256) { sdata[tid] = mySum = mySum + sdata[tid + 256]; } __syncthreads(); } if (blockDim.x >= 256) { if (tid < 128) { sdata[tid] = mySum = mySum + sdata[tid + 128]; } __syncthreads(); } if (blockDim.x >= 128) { if (tid < 64) { sdata[tid] = mySum = mySum + sdata[tid + 64]; } __syncthreads(); } if (tid < 32) { // now that we are using warp-synchronous programming (below) // we need to declare our shared memory volatile so that the compiler // doesn't reorder stores to it and induce incorrect behavior. volatile double* smem = sdata; if (blockDim.x >= 64) { smem[tid] = mySum = mySum + smem[tid + 32]; } if (blockDim.x >= 32) { smem[tid] = mySum = mySum + smem[tid + 16]; } if (blockDim.x >= 16) { smem[tid] = mySum = mySum + smem[tid + 8]; } if (blockDim.x >= 8) { smem[tid] = mySum = mySum + smem[tid + 4]; } if (blockDim.x >= 4) { smem[tid] = mySum = mySum + smem[tid + 2]; } if (blockDim.x >= 2) { smem[tid] = mySum = mySum + smem[tid + 1]; } } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = sdata[0]; }
f54e6d3bc90f0fcb255394892bad7f30cb7e0cb0.hip
// !!! This is a file automatically generated by hipify!!! #include <Eigen\Dense> //#include "C:\Users\angelo\Documents\Project\ThirdPartyLibraries\Eigen\Eigen\Dense" #include <limits> #include <vector> //#include <boost/math/special_functions.hpp> #include "Polynomials.cuh" //#include "debug.h" #include <fstream> #include <iomanip> #include <math.h> #include "setAntennas.h" typedef unsigned int uint; /************************************/ /* MATRIX PSEUDOINVERSE CALCULATION */ /************************************/ template<typename DerivedA> Eigen::Matrix< typename DerivedA::Scalar, DerivedA::ColsAtCompileTime, DerivedA::RowsAtCompileTime> PseudoInverse(const Eigen::MatrixBase<DerivedA> & a, double epsilon = std::numeric_limits<typename DerivedA::Scalar>::epsilon()) { assert(a.rows()>=a.cols()); typedef Eigen::Matrix<typename DerivedA::Scalar, DerivedA::RowsAtCompileTime, DerivedA::ColsAtCompileTime> InputType; typedef Eigen::Matrix<typename DerivedA::Scalar, DerivedA::ColsAtCompileTime, DerivedA::RowsAtCompileTime> ReturnType; Eigen::JacobiSVD<InputType> svd = a.jacobiSvd(Eigen::ComputeFullU |Eigen::ComputeFullV); double tolerance = epsilon * ::max(a.cols(),a.rows()) * svd.singularValues().array().abs().maxCoeff(); ReturnType sigma = ReturnType::Zero(a.cols(), a.rows()); sigma.block(0, 0, a.cols(), a.cols()) = (svd.singularValues().array().abs()>tolerance). select(svd.singularValues().array().inverse(), 0).matrix().asDiagonal(); return svd.matrixV() * sigma * svd.matrixU().adjoint(); } /***************************************************/ /* FROM ELEMENT POSITIONS TO LEGENDRE COEFFICIENTS */ /***************************************************/ // --- Projects the tx-rx antenna positions on Legendre polynomials void project_on_curve(const std::vector<double> &spos_, std::vector<double> &coeff_, std::vector<double> &LegendreCoeff_, double &step_, std::vector<double> &xsi_){ const uint Npos = spos_.size(); const uint Ncoeff = coeff_.size(); Eigen::MatrixXd spos(Npos,1); Eigen::MatrixXd coeff(Ncoeff,1); for(uint i = 0; i < Npos; i++) spos(i) = spos_[i]; Eigen::MatrixXd LegendreCoeff(Npos, Ncoeff); // --- Legendre matrix coefficients Eigen::VectorXd xsi(Npos); xsi.setLinSpaced(-1, 1); // --- Sampling grid of the Legendre coefficients for(uint j = 0; j < Npos; j++) xsi_[j] = xsi(j); step_ = fabs(xsi(1) - xsi(0)); // --- Computes the Legendre coefficients matrix in row-major order for(uint r = 0; r < Npos; r++ ) { for(uint c = 0; c < Ncoeff; c++ ) { //printf("LegendreCoeff(r,c) = boost::math::legendre_p(c + 1, xsi(r));\n"); //printf("LegendreCoeff(r,c) = %f\n", LegendreCoeff(r,c)); ////printf("boost::math::legendre_p(c + 1, xsi(r)) %f %f\n", boost::math::legendre_p(c + 1, xsi(r)), LegendreN(c + 1, xsi(r))); //printf("boost::math::legendre_p(c + 1, xsi(r)) %f\n", LegendreN(c + 1, xsi(r))); // //LegendreCoeff(r,c) = boost::math::legendre_p(c + 1, xsi(r)); LegendreCoeff(r,c) = LegendreN(c + 1, xsi(r)); //printf("LegendreCoeff_[r * Ncoeff + c] = LegendreCoeff(r,c); \n"); LegendreCoeff_[r * Ncoeff + c] = LegendreCoeff(r,c); } } coeff = PseudoInverse(LegendreCoeff) * spos; // --- Copy the result to output for(uint i = 0; i < Ncoeff; i++) coeff_[i] = coeff(i); }
f54e6d3bc90f0fcb255394892bad7f30cb7e0cb0.cu
#include <Eigen\Dense> //#include "C:\Users\angelo\Documents\Project\ThirdPartyLibraries\Eigen\Eigen\Dense" #include <limits> #include <vector> //#include <boost/math/special_functions.hpp> #include "Polynomials.cuh" //#include "debug.h" #include <fstream> #include <iomanip> #include <math.h> #include "setAntennas.h" typedef unsigned int uint; /************************************/ /* MATRIX PSEUDOINVERSE CALCULATION */ /************************************/ template<typename DerivedA> Eigen::Matrix< typename DerivedA::Scalar, DerivedA::ColsAtCompileTime, DerivedA::RowsAtCompileTime> PseudoInverse(const Eigen::MatrixBase<DerivedA> & a, double epsilon = std::numeric_limits<typename DerivedA::Scalar>::epsilon()) { assert(a.rows()>=a.cols()); typedef Eigen::Matrix<typename DerivedA::Scalar, DerivedA::RowsAtCompileTime, DerivedA::ColsAtCompileTime> InputType; typedef Eigen::Matrix<typename DerivedA::Scalar, DerivedA::ColsAtCompileTime, DerivedA::RowsAtCompileTime> ReturnType; Eigen::JacobiSVD<InputType> svd = a.jacobiSvd(Eigen::ComputeFullU |Eigen::ComputeFullV); double tolerance = epsilon * std::max(a.cols(),a.rows()) * svd.singularValues().array().abs().maxCoeff(); ReturnType sigma = ReturnType::Zero(a.cols(), a.rows()); sigma.block(0, 0, a.cols(), a.cols()) = (svd.singularValues().array().abs()>tolerance). select(svd.singularValues().array().inverse(), 0).matrix().asDiagonal(); return svd.matrixV() * sigma * svd.matrixU().adjoint(); } /***************************************************/ /* FROM ELEMENT POSITIONS TO LEGENDRE COEFFICIENTS */ /***************************************************/ // --- Projects the tx-rx antenna positions on Legendre polynomials void project_on_curve(const std::vector<double> &spos_, std::vector<double> &coeff_, std::vector<double> &LegendreCoeff_, double &step_, std::vector<double> &xsi_){ const uint Npos = spos_.size(); const uint Ncoeff = coeff_.size(); Eigen::MatrixXd spos(Npos,1); Eigen::MatrixXd coeff(Ncoeff,1); for(uint i = 0; i < Npos; i++) spos(i) = spos_[i]; Eigen::MatrixXd LegendreCoeff(Npos, Ncoeff); // --- Legendre matrix coefficients Eigen::VectorXd xsi(Npos); xsi.setLinSpaced(-1, 1); // --- Sampling grid of the Legendre coefficients for(uint j = 0; j < Npos; j++) xsi_[j] = xsi(j); step_ = fabs(xsi(1) - xsi(0)); // --- Computes the Legendre coefficients matrix in row-major order for(uint r = 0; r < Npos; r++ ) { for(uint c = 0; c < Ncoeff; c++ ) { //printf("LegendreCoeff(r,c) = boost::math::legendre_p(c + 1, xsi(r));\n"); //printf("LegendreCoeff(r,c) = %f\n", LegendreCoeff(r,c)); ////printf("boost::math::legendre_p(c + 1, xsi(r)) %f %f\n", boost::math::legendre_p(c + 1, xsi(r)), LegendreN(c + 1, xsi(r))); //printf("boost::math::legendre_p(c + 1, xsi(r)) %f\n", LegendreN(c + 1, xsi(r))); // //LegendreCoeff(r,c) = boost::math::legendre_p(c + 1, xsi(r)); LegendreCoeff(r,c) = LegendreN(c + 1, xsi(r)); //printf("LegendreCoeff_[r * Ncoeff + c] = LegendreCoeff(r,c); \n"); LegendreCoeff_[r * Ncoeff + c] = LegendreCoeff(r,c); } } coeff = PseudoInverse(LegendreCoeff) * spos; // --- Copy the result to output for(uint i = 0; i < Ncoeff; i++) coeff_[i] = coeff(i); }
eae65b92b365ebe6a3828f4c4467b15d1c7f305c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <rocblas.h> #include "mp1-util.h" #include <vector> extern "C"{ #include "trajectory.h" } #define MAXBINS 500000 #define BINSIZE128 64 #define BINSIZE256 256 #ifndef PI #define PI 3.141592653f #endif #define DBG_DCF 1 #define EPS 2.2204e-16 enum ConvType {ctCONST=0, ctSEP}; const char savePath[] = "/home_local/noaddy/research/data/code/dcfjcu/"; enum CompartmentSize { COMPARTMENT_64=64, COMPARTMENT_128=128 }; Timer timer; FILE* file; char filename[128]; /** \param[in] k Distance from k-space origin \param[in] F k-space scaling factor */ __device__ float P2D(float k, float F) { float val = 1.0f; if(k>1.0e-6) { val = F*j1f(F*PI*k)/(2.0f*k)/(PI*F*F/4.0f); val *= val; } return val; } /** \param[in] k Distance from k-space origin \param[in] F k-space scaling factor */ __device__ float P3D(float k, float F) { float val; // float s, c; float pfk; /*if(k<1e-6) val = 1.0f; else {*/ pfk = PI*F*k; // s = sinf(pfk); // c = cosf(pfk); // val = (s-pfk*c)/(2.0f*PI*PI*k*k*k) * (s-pfk*c)/(2.0f*PI*PI*k*k*k)/Pmax/Pmax; val = 3.0f*(sinf(pfk) - pfk*cosf(pfk))/(pfk*pfk*pfk); val *= val; //} return val; } __device__ float P3D1(float k, float F) { return 9.0f*(sinf(PI*F*k) - PI*F*k*cosf(PI*F*k))*(sinf(PI*F*k) - PI*F*k*cosf(PI*F*k))/(PI*F*k*PI*F*k*PI*F*k * PI*F*k*PI*F*k*PI*F*k); } __device__ float P1D(float k, float F) { float pfk = PI*F*k; float val; // Avoid NaN errors if(k>EPS) val = sinf(pfk)/pfk; else val = 0.0f; return val*val; } /** \param[in] connections \param[in] ndim Number of dimensions */ __global__ void conv_kernel(unsigned int *connections, int numConnections, int *bins, float *ks, int ndim, float kernelRad, float F, float* Win, float *Wout) { unsigned int index = blockIdx.x + gridDim.x*blockIdx.y; unsigned int tx = threadIdx.x; // unsigned int ty = threadIdx.y; unsigned int b1, b2; float3 k1, k2; float d, d2; float P; int n1, n2; int i2; if(index>=numConnections) return; b1 = connections[2*index]; b2 = connections[2*index+1]; n1 = bins[BINSIZE128*b1+tx]; if(n1<0) return; if(b1==b2) { k1.x = ks[ndim*n1]; k1.y = ks[ndim*n1+1]; if(ndim>2) k1.z = ks[ndim*n1+2]; else k1.z = 0.0f; for(i2=tx; i2<BINSIZE128; i2++) { n2 = bins[BINSIZE128*b2+i2]; if(n2>=0) { k2.x = ks[ndim*n2]; k2.y = ks[ndim*n2+1]; if(ndim>2) k2.z = ks[ndim*n2+2]; else k2.z = 0.0f; if(n1==n2) { // if(Wout[n1]>0) // if(n1==0) // printf("Wout %.0f, b1 %d, b2 %d, tx %d, n1 %d, n2 %d, index %d\n", Wout[n1], b1, b2, tx, n1, n2, index); atomicAdd(&Wout[n1], Win[n1]); // atomicAdd(&(Wout[n1]), 1); // Wout[n1] = Win[n1]; } else { // if(n1==60732 || n2==60732) // printf("Wout %.0f, b1 %d, b2 %d, tx %d, n1 %d, n2 %d, index %d, k1 (%f,%f,%f) k2 (%f,%f,%f)\n", Wout[n1], b1, b2, tx, n1, n2, index, k1.x, k1.y, k1.z, k2.x, k2.y, k2.z); d2 = (k2.x-k1.x)*(k2.x-k1.x) + (k2.y-k1.y)*(k2.y-k1.y) + (k2.z-k1.z)*(k2.z-k1.z); if(d2<=(kernelRad*kernelRad)) { d = sqrtf(d2); // P = P2D(d, F); P = P3D(d, F); // P = 0.0f; atomicAdd(&Wout[n1], P*Win[n2]); atomicAdd(&Wout[n2], P*Win[n1]); /*atomicAdd(&(Wout[n1]), 1); atomicAdd(&(Wout[n2]), 1);*/ } } } else break; } } else { k1.x = ks[ndim*n1]; k1.y = ks[ndim*n1+1]; if(ndim>2) k1.z = ks[ndim*n1+2]; else k1.z = 0.0f; for(i2=0; i2<BINSIZE128; i2++) { n2 = bins[BINSIZE128*b2+i2]; if(n2>=0) { k2.x = ks[ndim*n2]; k2.y = ks[ndim*n2+1]; if(ndim>2) k2.z = ks[ndim*n2+2]; else k2.z = 0.0f; d2 = (k2.x-k1.x)*(k2.x-k1.x) + (k2.y-k1.y)*(k2.y-k1.y) + (k2.z-k1.z)*(k2.z-k1.z); if(d2<=(kernelRad*kernelRad)) { d = sqrtf(d2); // P = P2D(d, F); P = P3D(d, F); // P = 0.0f; atomicAdd(&(Wout[n1]), P*Win[n2]); atomicAdd(&(Wout[n2]), P*Win[n1]); /*atomicAdd(&(Wout[n1]), 1); atomicAdd(&(Wout[n2]), 1);*/ // if(P>1.0f) // if(kernelRad-1e-4<d) // printf("Wout %.2f, P %f, b1 %d, b2 %d, tx %d, n1 %d, n2 %d, index %d\n", Wout[n1], P, b1, b2, tx, n1, n2, index); } } else break; } } return; } __global__ void conv_kernel2(unsigned int *connections, int numConnections, int *bins, float *ks, int ndim, float kernelRad, float F, float* Win, float *Wout) { unsigned int index = blockIdx.x + gridDim.x*blockIdx.y; unsigned int tx = threadIdx.x; unsigned int b1, b2; float d, d2; float P; int i2; if(index>=numConnections) return; b1 = connections[2*index]; b2 = connections[2*index+1]; float3 k1; int n1; __shared__ float3 k2[BINSIZE128]; __shared__ int n2[BINSIZE128]; n1 = bins[BINSIZE128*b1+tx]; if(n1<0) return; k1.x = ks[ndim*n1]; k1.y = ks[ndim*n1+1]; if(ndim>2) k1.z = ks[ndim*n1+2]; else k1.z = 0.0f; n2[tx] = bins[BINSIZE128*b2+tx]; if(n2>=0) { k2[tx].x = ks[ndim*n2[tx]]; k2[tx].y = ks[ndim*n2[tx]+1]; if(ndim>2) k2[tx].z = ks[ndim*n2[tx]+2]; else k2[tx].z = 0.0f; } __syncthreads(); if(b1==b2) { for(i2=tx; i2<BINSIZE128; i2++) { if(n2[i2]>=0) { if(n1==n2[i2]) { // if(Wout[n1]>0) // if(n1==0) // printf("Wout %.0f, b1 %d, b2 %d, tx %d, n1 %d, n2 %d, index %d\n", Wout[n1], b1, b2, tx, n1, n2, index); atomicAdd(&Wout[n1], Win[n1]); // atomicAdd(&(Wout[n1]), 1); // Wout[n1] = Win[n1]; } else { // if(n1==60732 || n2==60732) // printf("Wout %.0f, b1 %d, b2 %d, tx %d, n1 %d, n2 %d, index %d, k1 (%f,%f,%f) k2 (%f,%f,%f)\n", Wout[n1], b1, b2, tx, n1, n2, index, k1.x, k1.y, k1.z, k2.x, k2.y, k2.z); d2 = (k2[i2].x-k1.x)*(k2[i2].x-k1.x) + (k2[i2].y-k1.y)*(k2[i2].y-k1.y) + (k2[i2].z-k1.z)*(k2[i2].z-k1.z); if(d2<=(kernelRad*kernelRad)) { d = sqrtf(d2); P = P2D(d, F); atomicAdd(&Wout[n1], P*Win[n2[i2]]); atomicAdd(&Wout[n2[i2]], P*Win[n1]); /*atomicAdd(&(Wout[n1]), 1); atomicAdd(&(Wout[n2]), 1);*/ } } } else break; } } else { for(i2=0; i2<BINSIZE128; i2++) { if(n2[i2]>=0) { d2 = (k2[i2].x-k1.x)*(k2[i2].x-k1.x) + (k2[i2].y-k1.y)*(k2[i2].y-k1.y) + (k2[i2].z-k1.z)*(k2[i2].z-k1.z); if(d2<=(kernelRad*kernelRad)) { d = sqrtf(d2); P = P2D(d, F); atomicAdd(&(Wout[n1]), P*Win[n2[i2]]); atomicAdd(&(Wout[n2[i2]]), P*Win[n1]); /*atomicAdd(&(Wout[n1]), 1); atomicAdd(&(Wout[n2]), 1);*/ // if(P>1.0f) // if(kernelRad-1e-4<d) // printf("Wout %.2f, P %f, b1 %d, b2 %d, tx %d, n1 %d, n2 %d, index %d\n", Wout[n1], P, b1, b2, tx, n1, n2, index); } } else break; } } return; } /** Using template */ template <int NDIM, int BINSIZE> __global__ void conv_kernelt(unsigned int *connections, int numConnections, int *bins, float *ks, float kernelRad, float F, float* Win, float *Wout) { unsigned int index = blockIdx.x + gridDim.x*blockIdx.y; unsigned int tx = threadIdx.x; unsigned int b1, b2; float3 k1, k2; float d, d2; float P; int n1, n2; int i2; if(index>=numConnections) return; b1 = connections[2*index]; b2 = connections[2*index+1]; n1 = bins[BINSIZE*b1+tx]; if(n1<0) return; if(b1==b2) { // Processing matches in the same bin k1.x = ks[NDIM*n1]; k1.y = ks[NDIM*n1+1]; if(NDIM>2) k1.z = ks[NDIM*n1+2]; else k1.z = 0.0f; for(i2=tx; i2<BINSIZE; i2++) { n2 = bins[BINSIZE*b2+i2]; if(n2>=0) { k2.x = ks[NDIM*n2]; k2.y = ks[NDIM*n2+1]; if(NDIM>2) k2.z = ks[NDIM*n2+2]; else k2.z = 0.0f; if(n1==n2) { // if(Wout[n1]>0) // if(n1==0) // printf("Wout %.0f, b1 %d, b2 %d, tx %d, n1 %d, n2 %d, index %d\n", Wout[n1], b1, b2, tx, n1, n2, index); atomicAdd(&Wout[n1], Win[n1]); // atomicAdd(&(Wout[n1]), 1); // Wout[n1] = Win[n1]; } else { // if(n1==60732 || n2==60732) // printf("Wout %.0f, b1 %d, b2 %d, tx %d, n1 %d, n2 %d, index %d, k1 (%f,%f,%f) k2 (%f,%f,%f)\n", Wout[n1], b1, b2, tx, n1, n2, index, k1.x, k1.y, k1.z, k2.x, k2.y, k2.z); if(NDIM==3) d2 = (k2.x-k1.x)*(k2.x-k1.x) + (k2.y-k1.y)*(k2.y-k1.y) + (k2.z-k1.z)*(k2.z-k1.z); else d2 = (k2.x-k1.x)*(k2.x-k1.x) + (k2.y-k1.y)*(k2.y-k1.y); if(d2<=(kernelRad*kernelRad)) { d = sqrtf(d2); if(NDIM==3) P = P3D(d, F); else P = P2D(d, F); // P = 0.0f; atomicAdd(&Wout[n1], P*Win[n2]); atomicAdd(&Wout[n2], P*Win[n1]); /*atomicAdd(&(Wout[n1]), 1); atomicAdd(&(Wout[n2]), 1);*/ } } } else break; } } else { k1.x = ks[NDIM*n1]; k1.y = ks[NDIM*n1+1]; if(NDIM>2) k1.z = ks[NDIM*n1+2]; else k1.z = 0.0f; for(i2=0; i2<BINSIZE; i2++) { n2 = bins[BINSIZE*b2+i2]; if(n2>=0) { k2.x = ks[NDIM*n2]; k2.y = ks[NDIM*n2+1]; if(NDIM>2) k2.z = ks[NDIM*n2+2]; else k2.z = 0.0f; d2 = (k2.x-k1.x)*(k2.x-k1.x) + (k2.y-k1.y)*(k2.y-k1.y); if(NDIM==3) d2 += (k2.z-k1.z)*(k2.z-k1.z); if(d2<=(kernelRad*kernelRad)) { d = sqrtf(d2); if(NDIM==2) P = P2D(d, F); else P = P3D(d, F); // P = 0.0f; atomicAdd(&(Wout[n1]), P*Win[n2]); atomicAdd(&(Wout[n2]), P*Win[n1]); /*atomicAdd(&(Wout[n1]), 1); atomicAdd(&(Wout[n2]), 1);*/ // if(P>1.0f) // if(kernelRad-1e-4<d) // printf("Wout %.2f, P %f, b1 %d, b2 %d, tx %d, n1 %d, n2 %d, index %d\n", Wout[n1], P, b1, b2, tx, n1, n2, index); } } else break; } } //atomicAdd(pct, 1.0f/numConnections); //printf("%.2f\t", *pct); return; } /** Separable kernel */ template <int NDIM, int BINSIZE> __global__ void conv_kernel_sep(unsigned int *connections, int numConnections, int *bins, float *ks, float* kernelRad, float *F, float* Win, float *Wout) { unsigned int index = blockIdx.x + gridDim.x*blockIdx.y; unsigned int tx = threadIdx.x; unsigned int b1, b2; float3 k1, k2; float3 d; float P; int n1, n2; int i2; if(index>=numConnections) return; b1 = connections[2*index]; b2 = connections[2*index+1]; n1 = bins[BINSIZE*b1+tx]; if(n1<0) return; if(b1==b2) { k1.x = ks[NDIM*n1]; k1.y = ks[NDIM*n1+1]; if(NDIM>2) k1.z = ks[NDIM*n1+2]; else k1.z = 0.0f; for(i2=tx; i2<BINSIZE; i2++) { n2 = bins[BINSIZE*b2+i2]; if(n2>=0) { k2.x = ks[NDIM*n2]; k2.y = ks[NDIM*n2+1]; if(NDIM>2) k2.z = ks[NDIM*n2+2]; else k2.z = 0.0f; if(n1==n2) { // if(Wout[n1]>0) // if(n1==0) // printf("Wout %.0f, b1 %d, b2 %d, tx %d, n1 %d, n2 %d, index %d\n", Wout[n1], b1, b2, tx, n1, n2, index); atomicAdd(&Wout[n1], Win[n1]); // atomicAdd(&(Wout[n1]), 1); // Wout[n1] = Win[n1]; } else { // if(n1==60732 || n2==60732) // printf("Wout %.0f, b1 %d, b2 %d, tx %d, n1 %d, n2 %d, index %d, k1 (%f,%f,%f) k2 (%f,%f,%f)\n", Wout[n1], b1, b2, tx, n1, n2, index, k1.x, k1.y, k1.z, k2.x, k2.y, k2.z); d.x = fabs(k2.x-k1.x); d.y = fabs(k2.y-k1.y); d.z = fabs(k2.z-k1.z); if(NDIM==2) { if((d.x<kernelRad[0]) && (d.y<kernelRad[1])) { P = P1D(d.x, F[0]) * P1D(d.y, F[1]); atomicAdd(&Wout[n1], P*Win[n2]); atomicAdd(&Wout[n2], P*Win[n1]); } } else { if((d.x<kernelRad[0]) && (d.y<kernelRad[1]) && (d.z<kernelRad[2])) { P = P1D(d.x, F[0]) * P1D(d.y, F[1]) * P1D(d.z, F[2]); atomicAdd(&Wout[n1], P*Win[n2]); atomicAdd(&Wout[n2], P*Win[n1]); } } /*atomicAdd(&(Wout[n1]), 1); atomicAdd(&(Wout[n2]), 1);*/ } } else break; } } else { k1.x = ks[NDIM*n1]; k1.y = ks[NDIM*n1+1]; if(NDIM>2) k1.z = ks[NDIM*n1+2]; else k1.z = 0.0f; for(i2=0; i2<BINSIZE; i2++) { n2 = bins[BINSIZE*b2+i2]; if(n2>=0) { k2.x = ks[NDIM*n2]; k2.y = ks[NDIM*n2+1]; if(NDIM>2) k2.z = ks[NDIM*n2+2]; else k2.z = 0.0f; d.x = fabs(k2.x-k1.x); d.y = fabs(k2.y-k1.y); d.z = fabs(k2.z-k1.z); if(NDIM==2) { if((d.x<kernelRad[0]) && (d.y<kernelRad[1])) { P = P1D(d.x, F[0]) * P1D(d.y, F[1]); atomicAdd(&Wout[n1], P*Win[n2]); atomicAdd(&Wout[n2], P*Win[n1]); } } else { if((d.x<kernelRad[0]) && (d.y<kernelRad[1]) && (d.z<kernelRad[2])) { P = P1D(d.x, F[0]) * P1D(d.y, F[1]) * P1D(d.z, F[2]); atomicAdd(&Wout[n1], P*Win[n2]); atomicAdd(&Wout[n2], P*Win[n1]); } } /*atomicAdd(&(Wout[n1]), 1); atomicAdd(&(Wout[n2]), 1);*/ // if(P>1.0f) // if(kernelRad-1e-4<d) // printf("Wout %.2f, P %f, b1 %d, b2 %d, tx %d, n1 %d, n2 %d, index %d\n", Wout[n1], P, b1, b2, tx, n1, n2, index); } else break; } } return; } __global__ void divideArray(float* num, float* den, int npts) { unsigned int index = threadIdx.x + blockDim.x*blockIdx.x; if(index>=npts) return; if(fabsf(den[index])>0.0f) num[index] /= den[index]; else num[index] = 0.0f; return; } int findDataSize(int target, int max1, int max2, unsigned int *dim1, unsigned int *dim2) { int success= 0; int rem; int minRem=0; int d1, d2; if(target<max1) { *dim1 = target; *dim2 = 1; } else { d1 = max1; while(d1>0) { d2 = ceil(target/(1.0*d1)); rem = d1*d2 - target; if(d2>max2) d1 = 0; else if(rem<=minRem) { *dim1 = d1; *dim2 = d2; success = 1; } d1--; } } return success; } /** \param[in] ndim Number of dimensions \param[in] FOV Field of View (cm) */ void jdcfcu(int npts, float *k, int ndim, int *nInclude, float *FOV, float *voxDim, ConvType convType, int numLobes, int *numCt, int compartmentSize, int numIter, float *dcf) { unsigned int *binIdx; unsigned int *binCount1; int *bins, *bins_dev; unsigned int *binStartIdx; unsigned int *binCurrentIdx; unsigned int *binReps; unsigned int *binConnections, *binConnections_dev; int numConnections; int numBins1 = 1; int numBins2 = 0; int bsi, bci; int bi, bii; int freeBinIdx; int c[3]; int nptsActual; float pctBinFull; float kmax[3]; float F[3]; float *F_dev; float kernelRad[3]; float *kernelRad_dev; float kernelRadMax; int maxCtOffset[3] = {1,1,1}; float *Win_dev; float *Wout_dev; float *k_dev; dim3 grid_size, block_size; dim3 grid_size_div, block_size_div; int dim; int m, n, p, q; int mm, nn, pp, qq; int nnStart, ppStart, qqStart; printf("\n%d iterations\n", numIter); printf("%d lobes\n", numLobes); printf("Compartment size %d\n", compartmentSize); //Setup kernels for(n=0; n<ndim; n++) { kmax[n] = 5/voxDim[n]; F[n] = FOV[n]*2*kmax[n]; /* parameter for kernel width */ } switch(convType) { case ctCONST: if(ndim==2) *kernelRad = (numLobes*1.01+0.21)/(*F); else *kernelRad = (numLobes*1.01+0.44f)/(*F); kernelRadMax = *kernelRad; break; case ctSEP: kernelRadMax = 0.0f; for(n=0; n<ndim; n++) { kernelRad[n] = numLobes/F[n]; // sinc has spacing 1 kernelRadMax = max(kernelRadMax, kernelRad[n]); } break; } // Calculate optimal bin size for(dim=0; dim<ndim; dim++) { if(numCt[dim]<0) { switch(convType) { case ctCONST: numCt[dim] = 1/kernelRad[0]-1; break; case ctSEP: numCt[dim] = 1/kernelRad[dim]-1; break; } numCt[dim] -= numCt[dim]%2; } } // Calculate total number of bins for(dim=0; dim<ndim; dim++) numBins1 *= numCt[dim]; binCount1 = (unsigned int*)malloc(numBins1*sizeof(unsigned int)); // Initialize number of points in grid to 0 // memste(binCount1, 0, numBins1*sizeof(unsigned int)); for(n=0; n<numBins1; n++) binCount1[n] = 0; for(dim=ndim; dim<3; dim++) numCt[dim] = 1; if(nInclude==NULL) { nInclude = (int*)malloc(npts*sizeof(int)); for(n=0; n<npts; n++) nInclude[n] = 1; } binIdx = (unsigned int*)malloc(npts*sizeof(unsigned int)); nptsActual = 0; for(n=0; n<npts; n++) { if(nInclude[n]) { // dcf[n] = 1.0f; for(dim=0; dim<ndim; dim++) c[dim] = floor(k[ndim*n+dim]*numCt[dim]+numCt[dim]/2.0); for(dim=ndim; dim<3; dim++) c[dim] = 0; /* Make sure c is in bounds */ for(dim=0; dim<ndim; dim++) { c[dim] = min(c[dim], numCt[dim]-1); c[dim] = max(c[dim], 0); } binIdx[n] = (c[2]*numCt[1]+c[1])*numCt[0] + c[0]; if(binIdx[n]>numBins1) { fprintf(stderr, "Error numbins=%d, current bin idx=%d, n= %d\n", numBins1, binIdx[n], n); return; } binCount1[binIdx[n]]++; nptsActual++; } else dcf[n] = 0.0f; } printf("Calculating for %d/%d points\n", nptsActual, npts); //Count number of required bins binStartIdx = (unsigned int*)malloc(numBins1*sizeof(unsigned int)); binReps = (unsigned int*)malloc(numBins1*sizeof(unsigned int)); for(n=0; n<numBins1; n++) { if(n==0) binStartIdx[n] = 0; else binStartIdx[n] = binStartIdx[n-1]+binReps[n-1]; binReps[n] = ceil(binCount1[n]/(float)compartmentSize); numBins2 += binReps[n]; } bins = (int*)malloc(compartmentSize *numBins2*sizeof(int)); binCurrentIdx = (unsigned int*)malloc(numBins1*sizeof(unsigned int)); memset(binCurrentIdx, 0, numBins1*sizeof(unsigned int)); for(n=0; n< compartmentSize *numBins2; n++) bins[n] = -1; for(n=0; n<npts; n++) { if(nInclude[n]) { bi = binIdx[n]; bci = binCurrentIdx[bi]; bsi = compartmentSize *binStartIdx[bi]; bins[bsi+bci++] = n; binCurrentIdx[bi] = bci; } } pctBinFull = 0; for(n=0; n< compartmentSize *numBins2; n++) if(bins[n]!=-1) pctBinFull += 1.0f/(compartmentSize *numBins2); printf("%d bins\n", numBins2); printf("%f%% of binspace is full\n", 100*pctBinFull); // Setup connections if(convType==ctCONST) { printf("Kernel radius %f\n", *kernelRad); printf("F %f\n", *F); } else { printf("Kernel radii %f %f %f\n", kernelRad[0], kernelRad[1], kernelRad[2]); printf("F %f %f %f\n", F[0], F[1], F[2]); } // Calculate maximum compartment search distance switch(convType) { case ctCONST: for(dim=0; dim<ndim; dim++) maxCtOffset[dim] = (int)(*kernelRad*numCt[dim]+1); break; case ctSEP: for(n=0; n<ndim; n++) maxCtOffset[n] = (int)(kernelRad[n]*numCt[n]+1); break; } /* if(ndim==2) binConnections = (unsigned int*)malloc(2*100000*sizeof(unsigned int)); else if(ndim==3) binConnections = (unsigned int*)malloc(2*20000000*sizeof(unsigned int));*/ for(dim=0; dim<2; dim++) { // on first pass count number of connections and allocate // on second pass assign connections if(dim==1) binConnections = (unsigned int*)malloc(2*numConnections*sizeof(unsigned int)); numConnections = 0; for(m=0; m<numCt[0]; m++) for(n=0; n<numCt[1]; n++) for(p=0; p<numCt[2]; p++) { bi = (p*numCt[1]+n)*numCt[0]+m; for(mm=m; mm<min(m+maxCtOffset[0]+1, numCt[0]); mm++) { if(mm==m) nnStart = n; else nnStart = max(0,n-maxCtOffset[1]); for(nn=nnStart; nn<min(n+maxCtOffset[1]+1, numCt[1]); nn++) { if((nn==n) && (mm==m)) ppStart = p; else ppStart = max(0, p-maxCtOffset[2]); for(pp=ppStart; pp<min(p+maxCtOffset[2]+1, numCt[2]); pp++) { bii = (pp*numCt[1]+nn)*numCt[0]+mm; for(q=0; q<binReps[bi]; q++) { if(bi==bii) qqStart = q; else qqStart = 0; for(qq=qqStart; qq<binReps[bii]; qq++) { if(dim==0) numConnections++; else { binConnections[2*numConnections] = binStartIdx[bi]+q; binConnections[2*numConnections+++1] = binStartIdx[bii]+qq; } // printf("numConnections %d\n", numConnections); } } } } } } } printf("# compartments %d %d %d\n", numCt[0], numCt[1], numCt[2]); printf("Max compartment offset %d %d %d\n", maxCtOffset[0], maxCtOffset[1], maxCtOffset[2]); printf("%d connections\n", numConnections); block_size.x = compartmentSize; // block_size.y = BINSIZE; grid_size.x = numConnections; if(!findDataSize(numConnections, 65536, 65536, &grid_size.x, &grid_size.y)) { // Coldn't find exact factors grid_size.x = sqrt(numConnections); grid_size.y =ceil(numConnections/(double)grid_size.x); } printf("Grid size [%d,%d,%d]\n", grid_size.x, grid_size.y, grid_size.z); printf("Block size [%d,%d,%d]\n", block_size.x, block_size.y, block_size.z); findBlockGrid3(npts, 256, &block_size_div, &grid_size_div); block_size_div.x = 256; /* grid_size_div.x = npts/block_size_div.x; grid_size_div.x++;*/ printf("Divide grid size [%d,%d,%d]\n", grid_size_div.x, grid_size_div.y, grid_size_div.z); printf("Divide block size [%d,%d,%d]\n", block_size_div.x, block_size_div.y, block_size_div.z); // Allocate gpu memory timer.start(); hipMalloc((void**)&binConnections_dev, 2*numConnections*sizeof(unsigned int)); hipMalloc((void**)&bins_dev, compartmentSize*numBins2*sizeof(int)); hipMalloc((void**)&k_dev, ndim*npts*sizeof(float)); hipMalloc((void**)&Win_dev, npts*sizeof(float)); hipMalloc((void**)&Wout_dev, npts*sizeof(float)); // Copy to gpu hipMemcpy(binConnections_dev, binConnections, 2*numConnections*sizeof(unsigned int), hipMemcpyHostToDevice); hipMemcpy(bins_dev, bins, compartmentSize*numBins2*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(k_dev, k, ndim*npts*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(Win_dev, dcf, npts*sizeof(float), hipMemcpyHostToDevice); timer.stop("Allocate and copy to gpu"); for(n=0; n<numIter; n++) { timer.start(); hipMemset(Wout_dev, 0, npts*sizeof(float)); // conv_kernel<<<grid_size, block_size>>>(binConnections_dev, numConnections, bins_dev, k_dev, ndim, kernelRad, F, Win_dev, Wout_dev); switch(convType) { case ctCONST: if(ndim==2) hipLaunchKernelGGL(( conv_kernelt<2, 128>), dim3(grid_size), dim3(block_size), 0, 0, binConnections_dev, numConnections, bins_dev, k_dev, *kernelRad, *F, Win_dev, Wout_dev); else switch (compartmentSize) { case COMPARTMENT_128: hipLaunchKernelGGL(( conv_kernelt<3, COMPARTMENT_128>), dim3(grid_size), dim3(block_size), 0, 0, binConnections_dev, numConnections, bins_dev, k_dev, *kernelRad, *F, Win_dev, Wout_dev); break; case COMPARTMENT_64: hipLaunchKernelGGL(( conv_kernelt<3, COMPARTMENT_64>), dim3(grid_size), dim3(block_size), 0, 0, binConnections_dev, numConnections, bins_dev, k_dev, *kernelRad, *F, Win_dev, Wout_dev); break; } break; case ctSEP: hipMalloc((void**)&kernelRad_dev, 3*sizeof(float)); hipMalloc((void**)&F_dev, 3*sizeof(float)); hipMemcpy(kernelRad_dev, kernelRad, 3*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(F_dev, F, 3*sizeof(float), hipMemcpyHostToDevice); checkLaunch("copy"); if(ndim==2) switch(compartmentSize) { case COMPARTMENT_64: hipLaunchKernelGGL(( conv_kernel_sep<2, COMPARTMENT_64>), dim3(grid_size), dim3(block_size), 0, 0, binConnections_dev, numConnections, bins_dev, k_dev, kernelRad_dev, F_dev, Win_dev, Wout_dev); break; case COMPARTMENT_128: hipLaunchKernelGGL(( conv_kernel_sep<2, COMPARTMENT_128>), dim3(grid_size), dim3(block_size), 0, 0, binConnections_dev, numConnections, bins_dev, k_dev, kernelRad_dev, F_dev, Win_dev, Wout_dev); break; } else switch(compartmentSize) { case COMPARTMENT_64: hipLaunchKernelGGL(( conv_kernel_sep<3, COMPARTMENT_64>), dim3(grid_size), dim3(block_size), 0, 0, binConnections_dev, numConnections, bins_dev, k_dev, kernelRad_dev, F_dev, Win_dev, Wout_dev); break; case COMPARTMENT_128: hipLaunchKernelGGL(( conv_kernel_sep<3, COMPARTMENT_128>), dim3(grid_size), dim3(block_size), 0, 0, binConnections_dev, numConnections, bins_dev, k_dev, kernelRad_dev, F_dev, Win_dev, Wout_dev); break; case 256: hipLaunchKernelGGL(( conv_kernel_sep<3, 256>), dim3(grid_size), dim3(block_size), 0, 0, binConnections_dev, numConnections, bins_dev, k_dev, kernelRad_dev, F_dev, Win_dev, Wout_dev); break; case 512: hipLaunchKernelGGL(( conv_kernel_sep<3, 512>), dim3(grid_size), dim3(block_size), 0, 0, binConnections_dev, numConnections, bins_dev, k_dev, kernelRad_dev, F_dev, Win_dev, Wout_dev); break; default: fprintf(stderr, "Unsupported bin size %d {64,128,256,512}\n", compartmentSize); abort(); } break; } checkLaunch("convolution"); hipLaunchKernelGGL(( divideArray), dim3(grid_size_div), dim3(block_size_div), 0, 0, Win_dev, Wout_dev, npts); checkLaunch("division"); timer.stop("convolution"); } hipMemcpy(dcf, Wout_dev, npts*sizeof(float), hipMemcpyDeviceToHost); sprintf(filename, "%sWout", savePath); file = fopen(filename, "wb"); if(file!=NULL) { fwrite(dcf, npts, sizeof(float), file); fclose(file); } hipMemcpy(dcf, Win_dev, npts*sizeof(float), hipMemcpyDeviceToHost); sprintf(filename, "%sbinConnections", savePath); file = fopen(filename, "wb"); if(file!=NULL) { fwrite(binConnections, 2*numConnections, sizeof(unsigned int), file); fclose(file); } sprintf(filename, "%sbins", savePath); file = fopen(filename, "wb"); if(file!=NULL) { fwrite(bins, compartmentSize*numBins2, sizeof(int), file); fclose(file); } hipFree(binConnections_dev); hipFree(bins_dev); hipFree(Wout_dev); hipFree(Win_dev); hipFree(k_dev); free(binIdx); free(binConnections); return; } #ifndef MATLAB_MEX_FILE /*! * \brief Iteratively calculate the density compensation weights for a k-space trajectory. * * \param argc The number of command line arguments (including the executable). * \param argv Parameters * 1: File path to the k-space file with data stored as kx, ky, kz, dcf (3D) or kx, ky, dcf (2D). * 2: The number of readouts in the trajectory. * 3: The number of readout points per readout. * 4: The number of dimensions of the trajectory. * 5: X field of view (cm). * 6: Y field of view (cm). * 7: Z field of view (cm). * 8: X spatial resolution (mm). * 9: Y spatial resolution (mm). * 10: Z spatial resolution (mm). * 11: Endian mode. 'b' for big endian, 'l' for little. * 12: File path to save updated trajectory. */ int main(int argc, char *argv[]) { int numLobes = 2; int numCt[] = {-1,-1,-1}; int numIter = 1; ConvType cType = ctCONST; //printTrajectoryInfo(&traj, NULL); const char* filePathInput = argv[1]; printf("Loading k-space file from %s\n", filePathInput); Endian endian = argv[11][0] == 'b' ? BigEndian : LittleEndian; Trajectory* trajectory = loadKSpaceFile(filePathInput, atoi(argv[2]), atoi(argv[3]), atoi(argv[4]), endian); std::vector<float> coordinates; std::vector<float> densityCompensation; for(int r=0; r<trajectory->numReadouts; r++) for(int n=0; n< trajectory->numReadoutPoints; n++) { float pointCoordinates[3]; float density; trajectoryCoordinates(n, r, trajectory, pointCoordinates, &density); for (int d = 0; d < trajectory->numDimensions; d++) coordinates.push_back(pointCoordinates[d]); densityCompensation.push_back(density); } float fieldOfView[3]; float spatialResolution[3]; const int fieldOfViewArgumentIndex = 5; for (int d = 0; d < trajectory->numDimensions; d++) { fieldOfView[d] = atoi(argv[d + fieldOfViewArgumentIndex]); spatialResolution[d] = atoi(argv[d + fieldOfViewArgumentIndex + trajectory->numDimensions]); } jdcfcu(densityCompensation.size(), coordinates.data(), trajectory->numDimensions, NULL, fieldOfView, spatialResolution, cType, numLobes, numCt, COMPARTMENT_64, numIter, densityCompensation.data()); for(int n=0; n<densityCompensation.size(); n++) { const int readout = n / trajectory->numReadoutPoints; const int point = n % trajectory->numReadoutPoints; setTrajectoryPoint(point, readout, trajectory, NULL, densityCompensation[n]); } const char* filePathOutput = argv[12]; printf("Writing new file %s\n", filePathOutput); saveKSPaceFile(filePathOutput, trajectory, endian); } #else #include "mex.h" /* 0: k-space trajectory [axis, readout, interleaf] 1: dcf mask 2: FOV (cm) 3: Voxel dimensions (mm) 4: Convolution type 0-Constant 1-Separable 5: # of lobes 6: # of compartments 7: # iterations 8: dcf estimate */ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { float FOV[3]; float vd[3]; const mwSize *trajDims; int numDim; // number of dimensions const mwSize *dcfDims; float *k; float *dcf; int *dcfInclude; // indeces to include in processing int numCt[] = {-1,-1,-1}; int numLobes = 2; int numIter = 1; // number of iterations ConvType cType = ctSEP; int npts; // number of point in trajectory int n; int doLoadDcfMask = 1; int doLoadDcf = 0; double *dptr; char msg[128]; if(!mxIsSingle(prhs[0])) mexErrMsgTxt("k-space trajectory not given as single precission float"); trajDims = mxGetDimensions(prhs[0]); numDim = trajDims[0]; if(numDim>3) mexErrMsgTxt("More than 3 axes given in trajectory"); npts = trajDims[1]*trajDims[2]; k = (float*)mxGetData(prhs[0]); // dcf mask if(nrhs<2) mexErrMsgTxt("dcf mask not given as 4 byte int"); if(mxIsEmpty(prhs[1])) { // if dcf mask not specified include every point doLoadDcfMask = 0; dcfInclude = (int*)malloc(npts*sizeof(int)); for(n=0; n<npts; n++) dcfInclude[n] = 1; } else { if(!mxIsInt32(prhs[1])) mexErrMsgTxt("dcf mask not given as 4 byte int"); dcfInclude = (int*)mxGetData(prhs[1]); } // FOV if(nrhs<3) mexErrMsgTxt("FOV not given"); else if(mxGetNumberOfElements(prhs[2])<numDim) { sprintf(msg, "%d dimension(s) of FOV given, need %d", mxGetNumberOfElements(prhs[2]), numDim); mexErrMsgTxt(msg); } else { dptr = mxGetPr(prhs[2]); for(n=0; n<trajDims[0]; n++) FOV[n] = dptr[n]; } // Resolution if(nrhs<4) mexErrMsgTxt("resolution not given"); else if(mxGetNumberOfElements(prhs[3])<numDim) { sprintf(msg, "%d voxel dimension(s) given, need %d", mxGetNumberOfElements(prhs[3]), numDim); mexErrMsgTxt(msg); } else { dptr = mxGetPr(prhs[3]); for(n=0; n<trajDims[0]; n++) vd[n] = dptr[n]; } // Convolution type if(nrhs>4) cType = (ConvType)mxGetScalar(prhs[4]); // Number of lobes if(nrhs>5) numLobes = mxGetScalar(prhs[5]); if(nrhs>6) { if(mxGetNumberOfElements(prhs[6])<numDim) { sprintf(msg, "%d number compartment(s) given, need %d", mxGetNumberOfElements(prhs[6]), numDim); mexErrMsgTxt(msg); } dptr = mxGetPr(prhs[6]); for(n=0; n<trajDims[0]; n++) numCt[n] = dptr[n]; } if(nrhs>7) numIter = mxGetScalar(prhs[7]); if(DBG_DCF) { mexPrintf("%d points\n", npts); mexPrintf("FOV %f %f %f\n", FOV[0], FOV[1], FOV[2]); mexPrintf("# lobes %d\n", numLobes); mexPrintf("# compartments %d %d %d\n", numCt[0], numCt[1], numCt[2]); mexPrintf("# iterations %d\n", numIter); } if(nrhs>8) { // Update the dcf given if(!mxIsSingle(prhs[8])) mexErrMsgTxt("dcf estimate not given as float"); dcfDims = mxGetDimensions(prhs[8]); for(n=0; n<2; n++) if(dcfDims[n] != trajDims[n+1]) { sprintf(msg, "Dcf dimensions [%d, %d] do not equal trajectory dimensions [%d, %d]\n", dcfDims[0], dcfDims[1], trajDims[1], trajDims[2]); mexErrMsgTxt(msg); } plhs[0] = mxDuplicateArray(prhs[8]); dcf = (float*)mxGetData(plhs[0]); } else { // Start from flat dcf plhs[0] = mxCreateNumericArray(2, dcfDims, mxSINGLE_CLASS, mxREAL); dcf = (float*)mxGetData(plhs[0]); for(n=0; n<npts; n++) dcf[n] = 1.0f; } if(DBG_DCF) mexPrintf("Trajectory dimensions [%d %d]\n", dcfDims[0], dcfDims[1]); jdcfcu(npts, k, trajDims[0], dcfInclude, FOV, vd, cType, numLobes, numCt, 64, numIter, dcf); if(!doLoadDcfMask) free(dcfInclude); return; } #endif
eae65b92b365ebe6a3828f4c4467b15d1c7f305c.cu
#include <stdio.h> #include <cublas.h> #include "mp1-util.h" #include <vector> extern "C"{ #include "trajectory.h" } #define MAXBINS 500000 #define BINSIZE128 64 #define BINSIZE256 256 #ifndef PI #define PI 3.141592653f #endif #define DBG_DCF 1 #define EPS 2.2204e-16 enum ConvType {ctCONST=0, ctSEP}; const char savePath[] = "/home_local/noaddy/research/data/code/dcfjcu/"; enum CompartmentSize { COMPARTMENT_64=64, COMPARTMENT_128=128 }; Timer timer; FILE* file; char filename[128]; /** \param[in] k Distance from k-space origin \param[in] F k-space scaling factor */ __device__ float P2D(float k, float F) { float val = 1.0f; if(k>1.0e-6) { val = F*j1f(F*PI*k)/(2.0f*k)/(PI*F*F/4.0f); val *= val; } return val; } /** \param[in] k Distance from k-space origin \param[in] F k-space scaling factor */ __device__ float P3D(float k, float F) { float val; // float s, c; float pfk; /*if(k<1e-6) val = 1.0f; else {*/ pfk = PI*F*k; // s = sinf(pfk); // c = cosf(pfk); // val = (s-pfk*c)/(2.0f*PI*PI*k*k*k) * (s-pfk*c)/(2.0f*PI*PI*k*k*k)/Pmax/Pmax; val = 3.0f*(sinf(pfk) - pfk*cosf(pfk))/(pfk*pfk*pfk); val *= val; //} return val; } __device__ float P3D1(float k, float F) { return 9.0f*(sinf(PI*F*k) - PI*F*k*cosf(PI*F*k))*(sinf(PI*F*k) - PI*F*k*cosf(PI*F*k))/(PI*F*k*PI*F*k*PI*F*k * PI*F*k*PI*F*k*PI*F*k); } __device__ float P1D(float k, float F) { float pfk = PI*F*k; float val; // Avoid NaN errors if(k>EPS) val = sinf(pfk)/pfk; else val = 0.0f; return val*val; } /** \param[in] connections \param[in] ndim Number of dimensions */ __global__ void conv_kernel(unsigned int *connections, int numConnections, int *bins, float *ks, int ndim, float kernelRad, float F, float* Win, float *Wout) { unsigned int index = blockIdx.x + gridDim.x*blockIdx.y; unsigned int tx = threadIdx.x; // unsigned int ty = threadIdx.y; unsigned int b1, b2; float3 k1, k2; float d, d2; float P; int n1, n2; int i2; if(index>=numConnections) return; b1 = connections[2*index]; b2 = connections[2*index+1]; n1 = bins[BINSIZE128*b1+tx]; if(n1<0) return; if(b1==b2) { k1.x = ks[ndim*n1]; k1.y = ks[ndim*n1+1]; if(ndim>2) k1.z = ks[ndim*n1+2]; else k1.z = 0.0f; for(i2=tx; i2<BINSIZE128; i2++) { n2 = bins[BINSIZE128*b2+i2]; if(n2>=0) { k2.x = ks[ndim*n2]; k2.y = ks[ndim*n2+1]; if(ndim>2) k2.z = ks[ndim*n2+2]; else k2.z = 0.0f; if(n1==n2) { // if(Wout[n1]>0) // if(n1==0) // printf("Wout %.0f, b1 %d, b2 %d, tx %d, n1 %d, n2 %d, index %d\n", Wout[n1], b1, b2, tx, n1, n2, index); atomicAdd(&Wout[n1], Win[n1]); // atomicAdd(&(Wout[n1]), 1); // Wout[n1] = Win[n1]; } else { // if(n1==60732 || n2==60732) // printf("Wout %.0f, b1 %d, b2 %d, tx %d, n1 %d, n2 %d, index %d, k1 (%f,%f,%f) k2 (%f,%f,%f)\n", Wout[n1], b1, b2, tx, n1, n2, index, k1.x, k1.y, k1.z, k2.x, k2.y, k2.z); d2 = (k2.x-k1.x)*(k2.x-k1.x) + (k2.y-k1.y)*(k2.y-k1.y) + (k2.z-k1.z)*(k2.z-k1.z); if(d2<=(kernelRad*kernelRad)) { d = sqrtf(d2); // P = P2D(d, F); P = P3D(d, F); // P = 0.0f; atomicAdd(&Wout[n1], P*Win[n2]); atomicAdd(&Wout[n2], P*Win[n1]); /*atomicAdd(&(Wout[n1]), 1); atomicAdd(&(Wout[n2]), 1);*/ } } } else break; } } else { k1.x = ks[ndim*n1]; k1.y = ks[ndim*n1+1]; if(ndim>2) k1.z = ks[ndim*n1+2]; else k1.z = 0.0f; for(i2=0; i2<BINSIZE128; i2++) { n2 = bins[BINSIZE128*b2+i2]; if(n2>=0) { k2.x = ks[ndim*n2]; k2.y = ks[ndim*n2+1]; if(ndim>2) k2.z = ks[ndim*n2+2]; else k2.z = 0.0f; d2 = (k2.x-k1.x)*(k2.x-k1.x) + (k2.y-k1.y)*(k2.y-k1.y) + (k2.z-k1.z)*(k2.z-k1.z); if(d2<=(kernelRad*kernelRad)) { d = sqrtf(d2); // P = P2D(d, F); P = P3D(d, F); // P = 0.0f; atomicAdd(&(Wout[n1]), P*Win[n2]); atomicAdd(&(Wout[n2]), P*Win[n1]); /*atomicAdd(&(Wout[n1]), 1); atomicAdd(&(Wout[n2]), 1);*/ // if(P>1.0f) // if(kernelRad-1e-4<d) // printf("Wout %.2f, P %f, b1 %d, b2 %d, tx %d, n1 %d, n2 %d, index %d\n", Wout[n1], P, b1, b2, tx, n1, n2, index); } } else break; } } return; } __global__ void conv_kernel2(unsigned int *connections, int numConnections, int *bins, float *ks, int ndim, float kernelRad, float F, float* Win, float *Wout) { unsigned int index = blockIdx.x + gridDim.x*blockIdx.y; unsigned int tx = threadIdx.x; unsigned int b1, b2; float d, d2; float P; int i2; if(index>=numConnections) return; b1 = connections[2*index]; b2 = connections[2*index+1]; float3 k1; int n1; __shared__ float3 k2[BINSIZE128]; __shared__ int n2[BINSIZE128]; n1 = bins[BINSIZE128*b1+tx]; if(n1<0) return; k1.x = ks[ndim*n1]; k1.y = ks[ndim*n1+1]; if(ndim>2) k1.z = ks[ndim*n1+2]; else k1.z = 0.0f; n2[tx] = bins[BINSIZE128*b2+tx]; if(n2>=0) { k2[tx].x = ks[ndim*n2[tx]]; k2[tx].y = ks[ndim*n2[tx]+1]; if(ndim>2) k2[tx].z = ks[ndim*n2[tx]+2]; else k2[tx].z = 0.0f; } __syncthreads(); if(b1==b2) { for(i2=tx; i2<BINSIZE128; i2++) { if(n2[i2]>=0) { if(n1==n2[i2]) { // if(Wout[n1]>0) // if(n1==0) // printf("Wout %.0f, b1 %d, b2 %d, tx %d, n1 %d, n2 %d, index %d\n", Wout[n1], b1, b2, tx, n1, n2, index); atomicAdd(&Wout[n1], Win[n1]); // atomicAdd(&(Wout[n1]), 1); // Wout[n1] = Win[n1]; } else { // if(n1==60732 || n2==60732) // printf("Wout %.0f, b1 %d, b2 %d, tx %d, n1 %d, n2 %d, index %d, k1 (%f,%f,%f) k2 (%f,%f,%f)\n", Wout[n1], b1, b2, tx, n1, n2, index, k1.x, k1.y, k1.z, k2.x, k2.y, k2.z); d2 = (k2[i2].x-k1.x)*(k2[i2].x-k1.x) + (k2[i2].y-k1.y)*(k2[i2].y-k1.y) + (k2[i2].z-k1.z)*(k2[i2].z-k1.z); if(d2<=(kernelRad*kernelRad)) { d = sqrtf(d2); P = P2D(d, F); atomicAdd(&Wout[n1], P*Win[n2[i2]]); atomicAdd(&Wout[n2[i2]], P*Win[n1]); /*atomicAdd(&(Wout[n1]), 1); atomicAdd(&(Wout[n2]), 1);*/ } } } else break; } } else { for(i2=0; i2<BINSIZE128; i2++) { if(n2[i2]>=0) { d2 = (k2[i2].x-k1.x)*(k2[i2].x-k1.x) + (k2[i2].y-k1.y)*(k2[i2].y-k1.y) + (k2[i2].z-k1.z)*(k2[i2].z-k1.z); if(d2<=(kernelRad*kernelRad)) { d = sqrtf(d2); P = P2D(d, F); atomicAdd(&(Wout[n1]), P*Win[n2[i2]]); atomicAdd(&(Wout[n2[i2]]), P*Win[n1]); /*atomicAdd(&(Wout[n1]), 1); atomicAdd(&(Wout[n2]), 1);*/ // if(P>1.0f) // if(kernelRad-1e-4<d) // printf("Wout %.2f, P %f, b1 %d, b2 %d, tx %d, n1 %d, n2 %d, index %d\n", Wout[n1], P, b1, b2, tx, n1, n2, index); } } else break; } } return; } /** Using template */ template <int NDIM, int BINSIZE> __global__ void conv_kernelt(unsigned int *connections, int numConnections, int *bins, float *ks, float kernelRad, float F, float* Win, float *Wout) { unsigned int index = blockIdx.x + gridDim.x*blockIdx.y; unsigned int tx = threadIdx.x; unsigned int b1, b2; float3 k1, k2; float d, d2; float P; int n1, n2; int i2; if(index>=numConnections) return; b1 = connections[2*index]; b2 = connections[2*index+1]; n1 = bins[BINSIZE*b1+tx]; if(n1<0) return; if(b1==b2) { // Processing matches in the same bin k1.x = ks[NDIM*n1]; k1.y = ks[NDIM*n1+1]; if(NDIM>2) k1.z = ks[NDIM*n1+2]; else k1.z = 0.0f; for(i2=tx; i2<BINSIZE; i2++) { n2 = bins[BINSIZE*b2+i2]; if(n2>=0) { k2.x = ks[NDIM*n2]; k2.y = ks[NDIM*n2+1]; if(NDIM>2) k2.z = ks[NDIM*n2+2]; else k2.z = 0.0f; if(n1==n2) { // if(Wout[n1]>0) // if(n1==0) // printf("Wout %.0f, b1 %d, b2 %d, tx %d, n1 %d, n2 %d, index %d\n", Wout[n1], b1, b2, tx, n1, n2, index); atomicAdd(&Wout[n1], Win[n1]); // atomicAdd(&(Wout[n1]), 1); // Wout[n1] = Win[n1]; } else { // if(n1==60732 || n2==60732) // printf("Wout %.0f, b1 %d, b2 %d, tx %d, n1 %d, n2 %d, index %d, k1 (%f,%f,%f) k2 (%f,%f,%f)\n", Wout[n1], b1, b2, tx, n1, n2, index, k1.x, k1.y, k1.z, k2.x, k2.y, k2.z); if(NDIM==3) d2 = (k2.x-k1.x)*(k2.x-k1.x) + (k2.y-k1.y)*(k2.y-k1.y) + (k2.z-k1.z)*(k2.z-k1.z); else d2 = (k2.x-k1.x)*(k2.x-k1.x) + (k2.y-k1.y)*(k2.y-k1.y); if(d2<=(kernelRad*kernelRad)) { d = sqrtf(d2); if(NDIM==3) P = P3D(d, F); else P = P2D(d, F); // P = 0.0f; atomicAdd(&Wout[n1], P*Win[n2]); atomicAdd(&Wout[n2], P*Win[n1]); /*atomicAdd(&(Wout[n1]), 1); atomicAdd(&(Wout[n2]), 1);*/ } } } else break; } } else { k1.x = ks[NDIM*n1]; k1.y = ks[NDIM*n1+1]; if(NDIM>2) k1.z = ks[NDIM*n1+2]; else k1.z = 0.0f; for(i2=0; i2<BINSIZE; i2++) { n2 = bins[BINSIZE*b2+i2]; if(n2>=0) { k2.x = ks[NDIM*n2]; k2.y = ks[NDIM*n2+1]; if(NDIM>2) k2.z = ks[NDIM*n2+2]; else k2.z = 0.0f; d2 = (k2.x-k1.x)*(k2.x-k1.x) + (k2.y-k1.y)*(k2.y-k1.y); if(NDIM==3) d2 += (k2.z-k1.z)*(k2.z-k1.z); if(d2<=(kernelRad*kernelRad)) { d = sqrtf(d2); if(NDIM==2) P = P2D(d, F); else P = P3D(d, F); // P = 0.0f; atomicAdd(&(Wout[n1]), P*Win[n2]); atomicAdd(&(Wout[n2]), P*Win[n1]); /*atomicAdd(&(Wout[n1]), 1); atomicAdd(&(Wout[n2]), 1);*/ // if(P>1.0f) // if(kernelRad-1e-4<d) // printf("Wout %.2f, P %f, b1 %d, b2 %d, tx %d, n1 %d, n2 %d, index %d\n", Wout[n1], P, b1, b2, tx, n1, n2, index); } } else break; } } //atomicAdd(pct, 1.0f/numConnections); //printf("%.2f\t", *pct); return; } /** Separable kernel */ template <int NDIM, int BINSIZE> __global__ void conv_kernel_sep(unsigned int *connections, int numConnections, int *bins, float *ks, float* kernelRad, float *F, float* Win, float *Wout) { unsigned int index = blockIdx.x + gridDim.x*blockIdx.y; unsigned int tx = threadIdx.x; unsigned int b1, b2; float3 k1, k2; float3 d; float P; int n1, n2; int i2; if(index>=numConnections) return; b1 = connections[2*index]; b2 = connections[2*index+1]; n1 = bins[BINSIZE*b1+tx]; if(n1<0) return; if(b1==b2) { k1.x = ks[NDIM*n1]; k1.y = ks[NDIM*n1+1]; if(NDIM>2) k1.z = ks[NDIM*n1+2]; else k1.z = 0.0f; for(i2=tx; i2<BINSIZE; i2++) { n2 = bins[BINSIZE*b2+i2]; if(n2>=0) { k2.x = ks[NDIM*n2]; k2.y = ks[NDIM*n2+1]; if(NDIM>2) k2.z = ks[NDIM*n2+2]; else k2.z = 0.0f; if(n1==n2) { // if(Wout[n1]>0) // if(n1==0) // printf("Wout %.0f, b1 %d, b2 %d, tx %d, n1 %d, n2 %d, index %d\n", Wout[n1], b1, b2, tx, n1, n2, index); atomicAdd(&Wout[n1], Win[n1]); // atomicAdd(&(Wout[n1]), 1); // Wout[n1] = Win[n1]; } else { // if(n1==60732 || n2==60732) // printf("Wout %.0f, b1 %d, b2 %d, tx %d, n1 %d, n2 %d, index %d, k1 (%f,%f,%f) k2 (%f,%f,%f)\n", Wout[n1], b1, b2, tx, n1, n2, index, k1.x, k1.y, k1.z, k2.x, k2.y, k2.z); d.x = fabs(k2.x-k1.x); d.y = fabs(k2.y-k1.y); d.z = fabs(k2.z-k1.z); if(NDIM==2) { if((d.x<kernelRad[0]) && (d.y<kernelRad[1])) { P = P1D(d.x, F[0]) * P1D(d.y, F[1]); atomicAdd(&Wout[n1], P*Win[n2]); atomicAdd(&Wout[n2], P*Win[n1]); } } else { if((d.x<kernelRad[0]) && (d.y<kernelRad[1]) && (d.z<kernelRad[2])) { P = P1D(d.x, F[0]) * P1D(d.y, F[1]) * P1D(d.z, F[2]); atomicAdd(&Wout[n1], P*Win[n2]); atomicAdd(&Wout[n2], P*Win[n1]); } } /*atomicAdd(&(Wout[n1]), 1); atomicAdd(&(Wout[n2]), 1);*/ } } else break; } } else { k1.x = ks[NDIM*n1]; k1.y = ks[NDIM*n1+1]; if(NDIM>2) k1.z = ks[NDIM*n1+2]; else k1.z = 0.0f; for(i2=0; i2<BINSIZE; i2++) { n2 = bins[BINSIZE*b2+i2]; if(n2>=0) { k2.x = ks[NDIM*n2]; k2.y = ks[NDIM*n2+1]; if(NDIM>2) k2.z = ks[NDIM*n2+2]; else k2.z = 0.0f; d.x = fabs(k2.x-k1.x); d.y = fabs(k2.y-k1.y); d.z = fabs(k2.z-k1.z); if(NDIM==2) { if((d.x<kernelRad[0]) && (d.y<kernelRad[1])) { P = P1D(d.x, F[0]) * P1D(d.y, F[1]); atomicAdd(&Wout[n1], P*Win[n2]); atomicAdd(&Wout[n2], P*Win[n1]); } } else { if((d.x<kernelRad[0]) && (d.y<kernelRad[1]) && (d.z<kernelRad[2])) { P = P1D(d.x, F[0]) * P1D(d.y, F[1]) * P1D(d.z, F[2]); atomicAdd(&Wout[n1], P*Win[n2]); atomicAdd(&Wout[n2], P*Win[n1]); } } /*atomicAdd(&(Wout[n1]), 1); atomicAdd(&(Wout[n2]), 1);*/ // if(P>1.0f) // if(kernelRad-1e-4<d) // printf("Wout %.2f, P %f, b1 %d, b2 %d, tx %d, n1 %d, n2 %d, index %d\n", Wout[n1], P, b1, b2, tx, n1, n2, index); } else break; } } return; } __global__ void divideArray(float* num, float* den, int npts) { unsigned int index = threadIdx.x + blockDim.x*blockIdx.x; if(index>=npts) return; if(fabsf(den[index])>0.0f) num[index] /= den[index]; else num[index] = 0.0f; return; } int findDataSize(int target, int max1, int max2, unsigned int *dim1, unsigned int *dim2) { int success= 0; int rem; int minRem=0; int d1, d2; if(target<max1) { *dim1 = target; *dim2 = 1; } else { d1 = max1; while(d1>0) { d2 = ceil(target/(1.0*d1)); rem = d1*d2 - target; if(d2>max2) d1 = 0; else if(rem<=minRem) { *dim1 = d1; *dim2 = d2; success = 1; } d1--; } } return success; } /** \param[in] ndim Number of dimensions \param[in] FOV Field of View (cm) */ void jdcfcu(int npts, float *k, int ndim, int *nInclude, float *FOV, float *voxDim, ConvType convType, int numLobes, int *numCt, int compartmentSize, int numIter, float *dcf) { unsigned int *binIdx; unsigned int *binCount1; int *bins, *bins_dev; unsigned int *binStartIdx; unsigned int *binCurrentIdx; unsigned int *binReps; unsigned int *binConnections, *binConnections_dev; int numConnections; int numBins1 = 1; int numBins2 = 0; int bsi, bci; int bi, bii; int freeBinIdx; int c[3]; int nptsActual; float pctBinFull; float kmax[3]; float F[3]; float *F_dev; float kernelRad[3]; float *kernelRad_dev; float kernelRadMax; int maxCtOffset[3] = {1,1,1}; float *Win_dev; float *Wout_dev; float *k_dev; dim3 grid_size, block_size; dim3 grid_size_div, block_size_div; int dim; int m, n, p, q; int mm, nn, pp, qq; int nnStart, ppStart, qqStart; printf("\n%d iterations\n", numIter); printf("%d lobes\n", numLobes); printf("Compartment size %d\n", compartmentSize); //Setup kernels for(n=0; n<ndim; n++) { kmax[n] = 5/voxDim[n]; F[n] = FOV[n]*2*kmax[n]; /* parameter for kernel width */ } switch(convType) { case ctCONST: if(ndim==2) *kernelRad = (numLobes*1.01+0.21)/(*F); else *kernelRad = (numLobes*1.01+0.44f)/(*F); kernelRadMax = *kernelRad; break; case ctSEP: kernelRadMax = 0.0f; for(n=0; n<ndim; n++) { kernelRad[n] = numLobes/F[n]; // sinc has spacing 1 kernelRadMax = max(kernelRadMax, kernelRad[n]); } break; } // Calculate optimal bin size for(dim=0; dim<ndim; dim++) { if(numCt[dim]<0) { switch(convType) { case ctCONST: numCt[dim] = 1/kernelRad[0]-1; break; case ctSEP: numCt[dim] = 1/kernelRad[dim]-1; break; } numCt[dim] -= numCt[dim]%2; } } // Calculate total number of bins for(dim=0; dim<ndim; dim++) numBins1 *= numCt[dim]; binCount1 = (unsigned int*)malloc(numBins1*sizeof(unsigned int)); // Initialize number of points in grid to 0 // memste(binCount1, 0, numBins1*sizeof(unsigned int)); for(n=0; n<numBins1; n++) binCount1[n] = 0; for(dim=ndim; dim<3; dim++) numCt[dim] = 1; if(nInclude==NULL) { nInclude = (int*)malloc(npts*sizeof(int)); for(n=0; n<npts; n++) nInclude[n] = 1; } binIdx = (unsigned int*)malloc(npts*sizeof(unsigned int)); nptsActual = 0; for(n=0; n<npts; n++) { if(nInclude[n]) { // dcf[n] = 1.0f; for(dim=0; dim<ndim; dim++) c[dim] = floor(k[ndim*n+dim]*numCt[dim]+numCt[dim]/2.0); for(dim=ndim; dim<3; dim++) c[dim] = 0; /* Make sure c is in bounds */ for(dim=0; dim<ndim; dim++) { c[dim] = min(c[dim], numCt[dim]-1); c[dim] = max(c[dim], 0); } binIdx[n] = (c[2]*numCt[1]+c[1])*numCt[0] + c[0]; if(binIdx[n]>numBins1) { fprintf(stderr, "Error numbins=%d, current bin idx=%d, n= %d\n", numBins1, binIdx[n], n); return; } binCount1[binIdx[n]]++; nptsActual++; } else dcf[n] = 0.0f; } printf("Calculating for %d/%d points\n", nptsActual, npts); //Count number of required bins binStartIdx = (unsigned int*)malloc(numBins1*sizeof(unsigned int)); binReps = (unsigned int*)malloc(numBins1*sizeof(unsigned int)); for(n=0; n<numBins1; n++) { if(n==0) binStartIdx[n] = 0; else binStartIdx[n] = binStartIdx[n-1]+binReps[n-1]; binReps[n] = ceil(binCount1[n]/(float)compartmentSize); numBins2 += binReps[n]; } bins = (int*)malloc(compartmentSize *numBins2*sizeof(int)); binCurrentIdx = (unsigned int*)malloc(numBins1*sizeof(unsigned int)); memset(binCurrentIdx, 0, numBins1*sizeof(unsigned int)); for(n=0; n< compartmentSize *numBins2; n++) bins[n] = -1; for(n=0; n<npts; n++) { if(nInclude[n]) { bi = binIdx[n]; bci = binCurrentIdx[bi]; bsi = compartmentSize *binStartIdx[bi]; bins[bsi+bci++] = n; binCurrentIdx[bi] = bci; } } pctBinFull = 0; for(n=0; n< compartmentSize *numBins2; n++) if(bins[n]!=-1) pctBinFull += 1.0f/(compartmentSize *numBins2); printf("%d bins\n", numBins2); printf("%f%% of binspace is full\n", 100*pctBinFull); // Setup connections if(convType==ctCONST) { printf("Kernel radius %f\n", *kernelRad); printf("F %f\n", *F); } else { printf("Kernel radii %f %f %f\n", kernelRad[0], kernelRad[1], kernelRad[2]); printf("F %f %f %f\n", F[0], F[1], F[2]); } // Calculate maximum compartment search distance switch(convType) { case ctCONST: for(dim=0; dim<ndim; dim++) maxCtOffset[dim] = (int)(*kernelRad*numCt[dim]+1); break; case ctSEP: for(n=0; n<ndim; n++) maxCtOffset[n] = (int)(kernelRad[n]*numCt[n]+1); break; } /* if(ndim==2) binConnections = (unsigned int*)malloc(2*100000*sizeof(unsigned int)); else if(ndim==3) binConnections = (unsigned int*)malloc(2*20000000*sizeof(unsigned int));*/ for(dim=0; dim<2; dim++) { // on first pass count number of connections and allocate // on second pass assign connections if(dim==1) binConnections = (unsigned int*)malloc(2*numConnections*sizeof(unsigned int)); numConnections = 0; for(m=0; m<numCt[0]; m++) for(n=0; n<numCt[1]; n++) for(p=0; p<numCt[2]; p++) { bi = (p*numCt[1]+n)*numCt[0]+m; for(mm=m; mm<min(m+maxCtOffset[0]+1, numCt[0]); mm++) { if(mm==m) nnStart = n; else nnStart = max(0,n-maxCtOffset[1]); for(nn=nnStart; nn<min(n+maxCtOffset[1]+1, numCt[1]); nn++) { if((nn==n) && (mm==m)) ppStart = p; else ppStart = max(0, p-maxCtOffset[2]); for(pp=ppStart; pp<min(p+maxCtOffset[2]+1, numCt[2]); pp++) { bii = (pp*numCt[1]+nn)*numCt[0]+mm; for(q=0; q<binReps[bi]; q++) { if(bi==bii) qqStart = q; else qqStart = 0; for(qq=qqStart; qq<binReps[bii]; qq++) { if(dim==0) numConnections++; else { binConnections[2*numConnections] = binStartIdx[bi]+q; binConnections[2*numConnections+++1] = binStartIdx[bii]+qq; } // printf("numConnections %d\n", numConnections); } } } } } } } printf("# compartments %d %d %d\n", numCt[0], numCt[1], numCt[2]); printf("Max compartment offset %d %d %d\n", maxCtOffset[0], maxCtOffset[1], maxCtOffset[2]); printf("%d connections\n", numConnections); block_size.x = compartmentSize; // block_size.y = BINSIZE; grid_size.x = numConnections; if(!findDataSize(numConnections, 65536, 65536, &grid_size.x, &grid_size.y)) { // Coldn't find exact factors grid_size.x = sqrt(numConnections); grid_size.y =ceil(numConnections/(double)grid_size.x); } printf("Grid size [%d,%d,%d]\n", grid_size.x, grid_size.y, grid_size.z); printf("Block size [%d,%d,%d]\n", block_size.x, block_size.y, block_size.z); findBlockGrid3(npts, 256, &block_size_div, &grid_size_div); block_size_div.x = 256; /* grid_size_div.x = npts/block_size_div.x; grid_size_div.x++;*/ printf("Divide grid size [%d,%d,%d]\n", grid_size_div.x, grid_size_div.y, grid_size_div.z); printf("Divide block size [%d,%d,%d]\n", block_size_div.x, block_size_div.y, block_size_div.z); // Allocate gpu memory timer.start(); cudaMalloc((void**)&binConnections_dev, 2*numConnections*sizeof(unsigned int)); cudaMalloc((void**)&bins_dev, compartmentSize*numBins2*sizeof(int)); cudaMalloc((void**)&k_dev, ndim*npts*sizeof(float)); cudaMalloc((void**)&Win_dev, npts*sizeof(float)); cudaMalloc((void**)&Wout_dev, npts*sizeof(float)); // Copy to gpu cudaMemcpy(binConnections_dev, binConnections, 2*numConnections*sizeof(unsigned int), cudaMemcpyHostToDevice); cudaMemcpy(bins_dev, bins, compartmentSize*numBins2*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(k_dev, k, ndim*npts*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(Win_dev, dcf, npts*sizeof(float), cudaMemcpyHostToDevice); timer.stop("Allocate and copy to gpu"); for(n=0; n<numIter; n++) { timer.start(); cudaMemset(Wout_dev, 0, npts*sizeof(float)); // conv_kernel<<<grid_size, block_size>>>(binConnections_dev, numConnections, bins_dev, k_dev, ndim, kernelRad, F, Win_dev, Wout_dev); switch(convType) { case ctCONST: if(ndim==2) conv_kernelt<2, 128><<<grid_size, block_size>>>(binConnections_dev, numConnections, bins_dev, k_dev, *kernelRad, *F, Win_dev, Wout_dev); else switch (compartmentSize) { case COMPARTMENT_128: conv_kernelt<3, COMPARTMENT_128><<<grid_size, block_size>>>(binConnections_dev, numConnections, bins_dev, k_dev, *kernelRad, *F, Win_dev, Wout_dev); break; case COMPARTMENT_64: conv_kernelt<3, COMPARTMENT_64><<<grid_size, block_size>>>(binConnections_dev, numConnections, bins_dev, k_dev, *kernelRad, *F, Win_dev, Wout_dev); break; } break; case ctSEP: cudaMalloc((void**)&kernelRad_dev, 3*sizeof(float)); cudaMalloc((void**)&F_dev, 3*sizeof(float)); cudaMemcpy(kernelRad_dev, kernelRad, 3*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(F_dev, F, 3*sizeof(float), cudaMemcpyHostToDevice); checkLaunch("copy"); if(ndim==2) switch(compartmentSize) { case COMPARTMENT_64: conv_kernel_sep<2, COMPARTMENT_64><<<grid_size, block_size>>>(binConnections_dev, numConnections, bins_dev, k_dev, kernelRad_dev, F_dev, Win_dev, Wout_dev); break; case COMPARTMENT_128: conv_kernel_sep<2, COMPARTMENT_128><<<grid_size, block_size>>>(binConnections_dev, numConnections, bins_dev, k_dev, kernelRad_dev, F_dev, Win_dev, Wout_dev); break; } else switch(compartmentSize) { case COMPARTMENT_64: conv_kernel_sep<3, COMPARTMENT_64><<<grid_size, block_size>>>(binConnections_dev, numConnections, bins_dev, k_dev, kernelRad_dev, F_dev, Win_dev, Wout_dev); break; case COMPARTMENT_128: conv_kernel_sep<3, COMPARTMENT_128><<<grid_size, block_size>>>(binConnections_dev, numConnections, bins_dev, k_dev, kernelRad_dev, F_dev, Win_dev, Wout_dev); break; case 256: conv_kernel_sep<3, 256><<<grid_size, block_size>>>(binConnections_dev, numConnections, bins_dev, k_dev, kernelRad_dev, F_dev, Win_dev, Wout_dev); break; case 512: conv_kernel_sep<3, 512><<<grid_size, block_size>>>(binConnections_dev, numConnections, bins_dev, k_dev, kernelRad_dev, F_dev, Win_dev, Wout_dev); break; default: fprintf(stderr, "Unsupported bin size %d {64,128,256,512}\n", compartmentSize); abort(); } break; } checkLaunch("convolution"); divideArray<<<grid_size_div, block_size_div>>>(Win_dev, Wout_dev, npts); checkLaunch("division"); timer.stop("convolution"); } cudaMemcpy(dcf, Wout_dev, npts*sizeof(float), cudaMemcpyDeviceToHost); sprintf(filename, "%sWout", savePath); file = fopen(filename, "wb"); if(file!=NULL) { fwrite(dcf, npts, sizeof(float), file); fclose(file); } cudaMemcpy(dcf, Win_dev, npts*sizeof(float), cudaMemcpyDeviceToHost); sprintf(filename, "%sbinConnections", savePath); file = fopen(filename, "wb"); if(file!=NULL) { fwrite(binConnections, 2*numConnections, sizeof(unsigned int), file); fclose(file); } sprintf(filename, "%sbins", savePath); file = fopen(filename, "wb"); if(file!=NULL) { fwrite(bins, compartmentSize*numBins2, sizeof(int), file); fclose(file); } cudaFree(binConnections_dev); cudaFree(bins_dev); cudaFree(Wout_dev); cudaFree(Win_dev); cudaFree(k_dev); free(binIdx); free(binConnections); return; } #ifndef MATLAB_MEX_FILE /*! * \brief Iteratively calculate the density compensation weights for a k-space trajectory. * * \param argc The number of command line arguments (including the executable). * \param argv Parameters * 1: File path to the k-space file with data stored as kx, ky, kz, dcf (3D) or kx, ky, dcf (2D). * 2: The number of readouts in the trajectory. * 3: The number of readout points per readout. * 4: The number of dimensions of the trajectory. * 5: X field of view (cm). * 6: Y field of view (cm). * 7: Z field of view (cm). * 8: X spatial resolution (mm). * 9: Y spatial resolution (mm). * 10: Z spatial resolution (mm). * 11: Endian mode. 'b' for big endian, 'l' for little. * 12: File path to save updated trajectory. */ int main(int argc, char *argv[]) { int numLobes = 2; int numCt[] = {-1,-1,-1}; int numIter = 1; ConvType cType = ctCONST; //printTrajectoryInfo(&traj, NULL); const char* filePathInput = argv[1]; printf("Loading k-space file from %s\n", filePathInput); Endian endian = argv[11][0] == 'b' ? BigEndian : LittleEndian; Trajectory* trajectory = loadKSpaceFile(filePathInput, atoi(argv[2]), atoi(argv[3]), atoi(argv[4]), endian); std::vector<float> coordinates; std::vector<float> densityCompensation; for(int r=0; r<trajectory->numReadouts; r++) for(int n=0; n< trajectory->numReadoutPoints; n++) { float pointCoordinates[3]; float density; trajectoryCoordinates(n, r, trajectory, pointCoordinates, &density); for (int d = 0; d < trajectory->numDimensions; d++) coordinates.push_back(pointCoordinates[d]); densityCompensation.push_back(density); } float fieldOfView[3]; float spatialResolution[3]; const int fieldOfViewArgumentIndex = 5; for (int d = 0; d < trajectory->numDimensions; d++) { fieldOfView[d] = atoi(argv[d + fieldOfViewArgumentIndex]); spatialResolution[d] = atoi(argv[d + fieldOfViewArgumentIndex + trajectory->numDimensions]); } jdcfcu(densityCompensation.size(), coordinates.data(), trajectory->numDimensions, NULL, fieldOfView, spatialResolution, cType, numLobes, numCt, COMPARTMENT_64, numIter, densityCompensation.data()); for(int n=0; n<densityCompensation.size(); n++) { const int readout = n / trajectory->numReadoutPoints; const int point = n % trajectory->numReadoutPoints; setTrajectoryPoint(point, readout, trajectory, NULL, densityCompensation[n]); } const char* filePathOutput = argv[12]; printf("Writing new file %s\n", filePathOutput); saveKSPaceFile(filePathOutput, trajectory, endian); } #else #include "mex.h" /* 0: k-space trajectory [axis, readout, interleaf] 1: dcf mask 2: FOV (cm) 3: Voxel dimensions (mm) 4: Convolution type 0-Constant 1-Separable 5: # of lobes 6: # of compartments 7: # iterations 8: dcf estimate */ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { float FOV[3]; float vd[3]; const mwSize *trajDims; int numDim; // number of dimensions const mwSize *dcfDims; float *k; float *dcf; int *dcfInclude; // indeces to include in processing int numCt[] = {-1,-1,-1}; int numLobes = 2; int numIter = 1; // number of iterations ConvType cType = ctSEP; int npts; // number of point in trajectory int n; int doLoadDcfMask = 1; int doLoadDcf = 0; double *dptr; char msg[128]; if(!mxIsSingle(prhs[0])) mexErrMsgTxt("k-space trajectory not given as single precission float"); trajDims = mxGetDimensions(prhs[0]); numDim = trajDims[0]; if(numDim>3) mexErrMsgTxt("More than 3 axes given in trajectory"); npts = trajDims[1]*trajDims[2]; k = (float*)mxGetData(prhs[0]); // dcf mask if(nrhs<2) mexErrMsgTxt("dcf mask not given as 4 byte int"); if(mxIsEmpty(prhs[1])) { // if dcf mask not specified include every point doLoadDcfMask = 0; dcfInclude = (int*)malloc(npts*sizeof(int)); for(n=0; n<npts; n++) dcfInclude[n] = 1; } else { if(!mxIsInt32(prhs[1])) mexErrMsgTxt("dcf mask not given as 4 byte int"); dcfInclude = (int*)mxGetData(prhs[1]); } // FOV if(nrhs<3) mexErrMsgTxt("FOV not given"); else if(mxGetNumberOfElements(prhs[2])<numDim) { sprintf(msg, "%d dimension(s) of FOV given, need %d", mxGetNumberOfElements(prhs[2]), numDim); mexErrMsgTxt(msg); } else { dptr = mxGetPr(prhs[2]); for(n=0; n<trajDims[0]; n++) FOV[n] = dptr[n]; } // Resolution if(nrhs<4) mexErrMsgTxt("resolution not given"); else if(mxGetNumberOfElements(prhs[3])<numDim) { sprintf(msg, "%d voxel dimension(s) given, need %d", mxGetNumberOfElements(prhs[3]), numDim); mexErrMsgTxt(msg); } else { dptr = mxGetPr(prhs[3]); for(n=0; n<trajDims[0]; n++) vd[n] = dptr[n]; } // Convolution type if(nrhs>4) cType = (ConvType)mxGetScalar(prhs[4]); // Number of lobes if(nrhs>5) numLobes = mxGetScalar(prhs[5]); if(nrhs>6) { if(mxGetNumberOfElements(prhs[6])<numDim) { sprintf(msg, "%d number compartment(s) given, need %d", mxGetNumberOfElements(prhs[6]), numDim); mexErrMsgTxt(msg); } dptr = mxGetPr(prhs[6]); for(n=0; n<trajDims[0]; n++) numCt[n] = dptr[n]; } if(nrhs>7) numIter = mxGetScalar(prhs[7]); if(DBG_DCF) { mexPrintf("%d points\n", npts); mexPrintf("FOV %f %f %f\n", FOV[0], FOV[1], FOV[2]); mexPrintf("# lobes %d\n", numLobes); mexPrintf("# compartments %d %d %d\n", numCt[0], numCt[1], numCt[2]); mexPrintf("# iterations %d\n", numIter); } if(nrhs>8) { // Update the dcf given if(!mxIsSingle(prhs[8])) mexErrMsgTxt("dcf estimate not given as float"); dcfDims = mxGetDimensions(prhs[8]); for(n=0; n<2; n++) if(dcfDims[n] != trajDims[n+1]) { sprintf(msg, "Dcf dimensions [%d, %d] do not equal trajectory dimensions [%d, %d]\n", dcfDims[0], dcfDims[1], trajDims[1], trajDims[2]); mexErrMsgTxt(msg); } plhs[0] = mxDuplicateArray(prhs[8]); dcf = (float*)mxGetData(plhs[0]); } else { // Start from flat dcf plhs[0] = mxCreateNumericArray(2, dcfDims, mxSINGLE_CLASS, mxREAL); dcf = (float*)mxGetData(plhs[0]); for(n=0; n<npts; n++) dcf[n] = 1.0f; } if(DBG_DCF) mexPrintf("Trajectory dimensions [%d %d]\n", dcfDims[0], dcfDims[1]); jdcfcu(npts, k, trajDims[0], dcfInclude, FOV, vd, cType, numLobes, numCt, 64, numIter, dcf); if(!doLoadDcfMask) free(dcfInclude); return; } #endif
9cd765fc064c0bf7c4d701c8fae4f36ed4e42b35.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <iostream> using namespace std; hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size); void viewDeviceProp(); __global__ void addKernel(int *c, const int *a, const int *b) { int i = threadIdx.x; c[i] = a[i] + b[i]; } int main() { const int arraySize = 5; const int a[arraySize] = { 1, 2, 3, 4, 5 }; const int b[arraySize] = { 10, 20, 30, 40, 50 }; int c[arraySize] = { 0 }; // Add vectors in parallel. hipError_t cudaStatus = addWithCuda(c, a, b, arraySize); if (cudaStatus != hipSuccess) { fprintf(stderr, "addWithCuda failed!"); return 1; } printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n", c[0], c[1], c[2], c[3], c[4]); // Add vectors in parallel. //hipError_t cudaStatus; int num = 0; hipDeviceProp_t prop; cudaStatus = hipGetDeviceCount(&num); for (int i = 0; i < num; i++) { hipGetDeviceProperties(&prop, i); } cudaStatus = addWithCuda(c, a, b, arraySize); // hipDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = hipDeviceReset(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceReset failed!"); return 1; } viewDeviceProp(); return 0; } // Helper function for using CUDA to add vectors in parallel. hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size) { int *dev_a = 0; int *dev_b = 0; int *dev_c = 0; hipError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = hipMalloc((void**)&dev_c, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void**)&dev_b, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } cudaStatus = hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } // Launch a kernel on the GPU with one thread for each element. hipLaunchKernelGGL(( addKernel), dim3(1), dim3(size), 0, 0, dev_c, dev_a, dev_b); // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } Error: hipFree(dev_c); hipFree(dev_a); hipFree(dev_b); return cudaStatus; } void viewDeviceProp() { int dev = 0; hipDeviceProp_t devProp; hipGetDeviceProperties(&devProp, dev); cout << "Using GPU device " << dev << ":" << devProp.name << endl; cout << "Num of SM: " << devProp.multiProcessorCount << endl; cout << "Share memory size: " << devProp.sharedMemPerBlock / 1024.0 << "KB" << endl; cout << "Max Threads of a Block: " << devProp.maxThreadsPerBlock << endl; cout << "Max Threads of a EM: " << devProp.maxThreadsPerMultiProcessor << endl; cout << "Max wraps of a EM: " << devProp.maxThreadsPerMultiProcessor / 32 << endl; }
9cd765fc064c0bf7c4d701c8fae4f36ed4e42b35.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <iostream> using namespace std; cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size); void viewDeviceProp(); __global__ void addKernel(int *c, const int *a, const int *b) { int i = threadIdx.x; c[i] = a[i] + b[i]; } int main() { const int arraySize = 5; const int a[arraySize] = { 1, 2, 3, 4, 5 }; const int b[arraySize] = { 10, 20, 30, 40, 50 }; int c[arraySize] = { 0 }; // Add vectors in parallel. cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addWithCuda failed!"); return 1; } printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n", c[0], c[1], c[2], c[3], c[4]); // Add vectors in parallel. //cudaError_t cudaStatus; int num = 0; cudaDeviceProp prop; cudaStatus = cudaGetDeviceCount(&num); for (int i = 0; i < num; i++) { cudaGetDeviceProperties(&prop, i); } cudaStatus = addWithCuda(c, a, b, arraySize); // cudaDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); return 1; } viewDeviceProp(); return 0; } // Helper function for using CUDA to add vectors in parallel. cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size) { int *dev_a = 0; int *dev_b = 0; int *dev_c = 0; cudaError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } // Launch a kernel on the GPU with one thread for each element. addKernel<<<1, size>>>(dev_c, dev_a, dev_b); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } Error: cudaFree(dev_c); cudaFree(dev_a); cudaFree(dev_b); return cudaStatus; } void viewDeviceProp() { int dev = 0; cudaDeviceProp devProp; cudaGetDeviceProperties(&devProp, dev); cout << "Using GPU device " << dev << ":" << devProp.name << endl; cout << "Num of SM: " << devProp.multiProcessorCount << endl; cout << "Share memory size: " << devProp.sharedMemPerBlock / 1024.0 << "KB" << endl; cout << "Max Threads of a Block: " << devProp.maxThreadsPerBlock << endl; cout << "Max Threads of a EM: " << devProp.maxThreadsPerMultiProcessor << endl; cout << "Max wraps of a EM: " << devProp.maxThreadsPerMultiProcessor / 32 << endl; }
43f6a8ff4083fae24216e1d7299902f5bca3401e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2016 paddlepaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <algorithm> #include <vector> #include "paddle/fluid/operators/math/pooling.h" #include "paddle/fluid/platform/cuda_primitives.h" namespace paddle { namespace operators { namespace math { template <typename PoolProcess, typename T> __global__ void KernelPool2D(const int nthreads, const T* input_data, const int channels, const int input_height, const int input_width, const int output_height, const int output_width, const int ksize_height, const int ksize_width, const int stride_height, const int stride_width, const int padding_height, const int padding_width, PoolProcess pool_process, bool exclusive, T* output_data) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int pw = index % output_width; int ph = (index / output_width) % output_height; int c = (index / output_width / output_height) % channels; int batch_idx = index / output_width / output_height / channels; int hstart = ph * stride_height - padding_height; int hend = min(hstart + ksize_height, input_height); hstart = max(hstart, 0); int wstart = pw * stride_width - padding_width; int wend = min(wstart + ksize_width, input_width); wstart = max(wstart, 0); input_data += (batch_idx * channels + c) * input_height * input_width; T ele = pool_process.initial(); for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { pool_process.compute(input_data[h * input_width + w], &ele); } } int pool_size = exclusive ? (hend - hstart) * (wend - wstart) : ksize_height * ksize_width; pool_process.finalize(static_cast<T>(pool_size), &ele); output_data[index] = ele; } } template <typename PoolProcess, typename T> __global__ void KernelPool2DGrad( const int nthreads, const T* input_data, const T* output_data, const T* output_grad, const int channels, const int input_height, const int input_width, const int output_height, const int output_width, const int ksize_height, const int ksize_width, const int stride_height, const int stride_width, const int padding_height, const int padding_width, PoolProcess pool_process, bool exclusive, T* input_grad) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int offsetW = index % input_width + padding_width; int offsetH = (index / input_width) % input_height + padding_height; int offsetC = (index / input_width / input_height) % channels; int batch_idx = index / input_width / input_height / channels; int phstart = (offsetH < ksize_height) ? 0 : (offsetH - ksize_height) / stride_height + 1; int pwstart = (offsetW < ksize_width) ? 0 : (offsetW - ksize_width) / stride_width + 1; int phend = min(offsetH / stride_height + 1, output_height); int pwend = min(offsetW / stride_width + 1, output_width); T gradient = 0; T input = input_data[index]; int output_idx = (batch_idx * channels + offsetC) * output_height * output_width; output_data += output_idx; output_grad += output_idx; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { int hstart = ph * stride_height - padding_height; int wstart = pw * stride_width - padding_width; int hend = min(hstart + ksize_height, input_height); int wend = min(wstart + ksize_width, input_width); hstart = max(hstart, 0); wstart = max(wstart, 0); int pool_size = exclusive ? (hend - hstart) * (wend - wstart) : ksize_height * ksize_width; int output_sub_idx = ph * output_width + pw; pool_process.compute(input, output_data[output_sub_idx], output_grad[output_sub_idx], static_cast<T>(1.0 / pool_size), &gradient); } } input_grad[index] = gradient; } } template <typename T> __global__ void KernelMaxPool2DGrad( const int nthreads, const T* input_data, const T* output_data, const T* output_grad, const int channels, const int input_height, const int input_width, const int output_height, const int output_width, const int ksize_height, const int ksize_width, const int stride_height, const int stride_width, const int padding_height, const int padding_width, T* input_grad) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int pw = index % output_width; int ph = (index / output_width) % output_height; int c = (index / output_width / output_height) % channels; int batch_idx = index / output_width / output_height / channels; int hstart = ph * stride_height - padding_height; int hend = min(hstart + ksize_height, input_height); hstart = max(hstart, 0); int wstart = pw * stride_width - padding_width; int wend = min(wstart + ksize_width, input_width); wstart = max(wstart, 0); input_data += (batch_idx * channels + c) * input_height * input_width; input_grad += (batch_idx * channels + c) * input_height * input_width; T ele = output_data[index]; int maxIndex = -1; bool stop = false; for (int h = hstart; h < hend && !stop; ++h) { for (int w = wstart; w < wend && !stop; ++w) { if (ele == input_data[h * input_width + w]) { maxIndex = h * input_width + w; stop = true; } } } if (maxIndex != -1) { // atomic add platform::CudaAtomicAdd(input_grad + maxIndex, output_grad[index]); } } } template <typename PoolProcess, typename T> void Pool2dDirectCUDAFunctor<PoolProcess, T>::operator()( const T* input, const std::vector<int>& input_shape, const std::vector<int>& output_shape, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, PoolProcess pool_compute, bool exclusive, T* output, hipStream_t stream) { const int batch_size = input_shape[0]; const int input_channels = input_shape[1]; const int input_height = input_shape[2]; const int input_width = input_shape[3]; const int output_channels = output_shape[1]; const int output_height = output_shape[2]; const int output_width = output_shape[3]; const int ksize_height = ksize[0]; const int ksize_width = ksize[1]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; int nthreads = batch_size * output_channels * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); hipLaunchKernelGGL(( KernelPool2D<PoolProcess, T>), dim3(grid), dim3(threads), 0, stream, nthreads, input, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, pool_compute, exclusive, output); } /* * All tensors are in NCHW format. * Ksize, strides, paddings are two elements. These two elements represent * height and width, respectively. */ template <typename PoolProcess, typename T> class Pool2dFunctor<platform::CUDADeviceContext, PoolProcess, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, PoolProcess pool_process, bool exclusive, framework::Tensor* output) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_height = input.dims()[2]; const int input_width = input.dims()[3]; const int output_channels = output->dims()[1]; const int output_height = output->dims()[2]; const int output_width = output->dims()[3]; const int ksize_height = ksize[0]; const int ksize_width = ksize[1]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; const T* input_data = input.data<T>(); T* output_data = output->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * output_channels * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); hipLaunchKernelGGL(( KernelPool2D<PoolProcess, T>), dim3(grid), dim3(threads), 0, context.stream(), nthreads, input_data, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, pool_process, exclusive, output_data); } }; /* * All tensors are in NCHW format. * Ksize, strides, paddings are two elements. These two elements represent * height and width, respectively. */ template <typename PoolProcess, typename T> class Pool2dGradFunctor<platform::CUDADeviceContext, PoolProcess, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const framework::Tensor& output, const framework::Tensor& output_grad, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, PoolProcess pool_process, bool exclusive, framework::Tensor* input_grad) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_height = input.dims()[2]; const int input_width = input.dims()[3]; const int output_height = output.dims()[2]; const int output_width = output.dims()[3]; const int ksize_height = ksize[0]; const int ksize_width = ksize[1]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; const T* input_data = input.data<T>(); const T* output_data = output.data<T>(); const T* output_grad_data = output_grad.data<T>(); T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * input_channels * input_height * input_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); hipLaunchKernelGGL(( KernelPool2DGrad<PoolProcess, T>), dim3(grid), dim3(threads), 0, context.stream(), nthreads, input_data, output_data, output_grad_data, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, pool_process, exclusive, input_grad_data); } }; /* * All tensors are in NCHW format. * Ksize, strides, paddings are two elements. These two elements represent * height and width, respectively. */ template <typename T> class MaxPool2dGradFunctor<platform::CUDADeviceContext, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const framework::Tensor& output, const framework::Tensor& output_grad, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, framework::Tensor* input_grad) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_height = input.dims()[2]; const int input_width = input.dims()[3]; const int output_channels = output.dims()[1]; const int output_height = output.dims()[2]; const int output_width = output.dims()[3]; const int ksize_height = ksize[0]; const int ksize_width = ksize[1]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; const T* input_data = input.data<T>(); const T* output_data = output.data<T>(); const T* output_grad_data = output_grad.data<T>(); T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * output_channels * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); hipLaunchKernelGGL(( KernelMaxPool2DGrad<T>), dim3(grid), dim3(threads), 0, context.stream(), nthreads, input_data, output_data, output_grad_data, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, input_grad_data); } }; template class Pool2dDirectCUDAFunctor<paddle::operators::math::MaxPool<float>, float>; template class Pool2dDirectCUDAFunctor<paddle::operators::math::AvgPool<float>, float>; template class MaxPool2dGradFunctor<platform::CUDADeviceContext, float>; template class MaxPool2dGradFunctor<platform::CUDADeviceContext, double>; template class Pool2dFunctor<platform::CUDADeviceContext, paddle::operators::math::MaxPool<float>, float>; template class Pool2dFunctor<platform::CUDADeviceContext, paddle::operators::math::AvgPool<float>, float>; template class Pool2dGradFunctor<platform::CUDADeviceContext, paddle::operators::math::MaxPoolGrad<float>, float>; template class Pool2dGradFunctor<platform::CUDADeviceContext, paddle::operators::math::AvgPoolGrad<float>, float>; template class Pool2dFunctor<platform::CUDADeviceContext, paddle::operators::math::MaxPool<double>, double>; template class Pool2dFunctor<platform::CUDADeviceContext, paddle::operators::math::AvgPool<double>, double>; template class Pool2dGradFunctor<platform::CUDADeviceContext, paddle::operators::math::MaxPoolGrad<double>, double>; template class Pool2dGradFunctor<platform::CUDADeviceContext, paddle::operators::math::AvgPoolGrad<double>, double>; template <typename PoolProcess, typename T> __global__ void KernelPool3D( const int nthreads, const T* input_data, const int channels, const int input_depth, const int input_height, const int input_width, const int output_depth, const int output_height, const int output_width, const int ksize_depth, const int ksize_height, const int ksize_width, const int stride_depth, const int stride_height, const int stride_width, const int padding_depth, const int padding_height, const int padding_width, PoolProcess pool_process, bool exclusive, T* output_data) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int pw = index % output_width; int ph = (index / output_width) % output_height; int pd = (index / output_width / output_height) % output_depth; int c = (index / output_width / output_height / output_depth) % channels; int batch_idx = index / output_width / output_height / output_depth / channels; int dstart = pd * stride_depth - padding_depth; int hstart = ph * stride_height - padding_height; int wstart = pw * stride_width - padding_width; int dend = min(dstart + ksize_depth, input_depth); int hend = min(hstart + ksize_height, input_height); int wend = min(wstart + ksize_width, input_width); dstart = max(dstart, 0); hstart = max(hstart, 0); wstart = max(wstart, 0); T ele = pool_process.initial(); input_data += (batch_idx * channels + c) * input_depth * input_height * input_width; for (int d = dstart; d < dend; ++d) { for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { pool_process.compute( input_data[(d * input_height + h) * input_width + w], &ele); } } } int pool_size = exclusive ? (dend - dstart) * (hend - hstart) * (wend - wstart) : ksize_depth * ksize_height * ksize_width; pool_process.finalize(static_cast<T>(pool_size), &ele); output_data[index] = ele; } } template <typename PoolProcess, typename T> __global__ void KernelPool3DGrad( const int nthreads, const T* input_data, const T* output_data, const T* output_grad, const int channels, const int input_depth, const int input_height, const int input_width, const int output_depth, const int output_height, const int output_width, const int ksize_depth, const int ksize_height, const int ksize_width, const int stride_depth, const int stride_height, const int stride_width, const int padding_depth, const int padding_height, const int padding_width, PoolProcess pool_process, bool exclusive, T* input_grad) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int offsetW = index % input_width + padding_width; int offsetH = (index / input_width) % input_height + padding_height; int offsetD = (index / input_width / input_height) % input_depth + padding_depth; int offsetC = (index / input_width / input_height / input_depth) % channels; int batch_idx = index / input_width / input_height / input_depth / channels; int pdstart = (offsetD < ksize_depth) ? 0 : (offsetD - ksize_depth) / stride_depth + 1; int phstart = (offsetH < ksize_height) ? 0 : (offsetH - ksize_height) / stride_height + 1; int pwstart = (offsetW < ksize_width) ? 0 : (offsetW - ksize_width) / stride_width + 1; int pdend = min((offsetD) / stride_depth + 1, output_depth); int phend = min((offsetH) / stride_height + 1, output_height); int pwend = min((offsetW) / stride_width + 1, output_width); T gradient = 0; T input = input_data[index]; int output_idx = (batch_idx * channels + offsetC) * output_depth * output_height * output_width; output_data += output_idx; output_grad += output_idx; for (int pd = pdstart; pd < pdend; ++pd) { for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { // figure out the pooling size int dstart = pd * stride_depth - padding_depth; int hstart = ph * stride_height - padding_height; int wstart = pw * stride_width - padding_width; int dend = min(dstart + ksize_depth, input_depth); int hend = min(hstart + ksize_height, input_height); int wend = min(wstart + ksize_width, input_width); dstart = max(dstart, 0); hstart = max(hstart, 0); wstart = max(wstart, 0); int pool_size = exclusive ? (dend - dstart) * (hend - hstart) * (wend - wstart) : ksize_depth * ksize_height * ksize_width; int output_sub_idx = (pd * output_height + ph) * output_width + pw; pool_process.compute(input, output_data[output_sub_idx], output_grad[output_sub_idx], static_cast<T>(1.0 / pool_size), &gradient); } } } input_grad[index] = gradient; } } template <typename T> __global__ void KernelMaxPool3DGrad( const int nthreads, const T* input_data, const T* output_data, const T* output_grad, const int channels, const int input_depth, const int input_height, const int input_width, const int output_depth, const int output_height, const int output_width, const int ksize_depth, const int ksize_height, const int ksize_width, const int stride_depth, const int stride_height, const int stride_width, const int padding_depth, const int padding_height, const int padding_width, T* input_grad) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int pw = index % output_width; int ph = (index / output_width) % output_height; int pd = (index / output_width / output_height) % output_depth; int c = (index / output_width / output_height / output_depth) % channels; int batch_idx = index / output_width / output_height / output_depth / channels; int dstart = pd * stride_depth - padding_depth; int hstart = ph * stride_height - padding_height; int wstart = pw * stride_width - padding_width; int dend = min(dstart + ksize_depth, input_depth); int hend = min(hstart + ksize_height, input_height); int wend = min(wstart + ksize_width, input_width); dstart = max(dstart, 0); hstart = max(hstart, 0); wstart = max(wstart, 0); T ele = output_data[index]; bool stop = false; int maxIdx = -1; input_data += (batch_idx * channels + c) * input_depth * input_height * input_width; input_grad += (batch_idx * channels + c) * input_depth * input_height * input_width; for (int d = dstart; d < dend && !stop; ++d) { for (int h = hstart; h < hend && !stop; ++h) { for (int w = wstart; w < wend && !stop; ++w) { if (ele == input_data[(d * input_height + h) * input_width + w]) { stop = true; maxIdx = (d * input_height + h) * input_width + w; } } } } if (maxIdx != -1) { // atomic add platform::CudaAtomicAdd(input_grad + maxIdx, output_grad[index]); } } } /* * All tensors are in NCDHW format. * Ksize, strides, paddings are three elements. These three elements represent * depth, height and width, respectively. */ template <typename PoolProcess, class T> class Pool3dFunctor<platform::CUDADeviceContext, PoolProcess, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, PoolProcess pool_process, bool exclusive, framework::Tensor* output) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_depth = input.dims()[2]; const int input_height = input.dims()[3]; const int input_width = input.dims()[4]; const int output_channels = output->dims()[1]; const int output_depth = output->dims()[2]; const int output_height = output->dims()[3]; const int output_width = output->dims()[4]; const int ksize_depth = ksize[0]; const int ksize_height = ksize[1]; const int ksize_width = ksize[2]; const int stride_depth = strides[0]; const int stride_height = strides[1]; const int stride_width = strides[2]; const int padding_depth = paddings[0]; const int padding_height = paddings[1]; const int padding_width = paddings[2]; const T* input_data = input.data<T>(); T* output_data = output->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * output_channels * output_depth * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); hipLaunchKernelGGL(( KernelPool3D<PoolProcess, T>), dim3(grid), dim3(threads), 0, context.stream(), nthreads, input_data, input_channels, input_depth, input_height, input_width, output_depth, output_height, output_width, ksize_depth, ksize_height, ksize_width, stride_depth, stride_height, stride_width, padding_depth, padding_height, padding_width, pool_process, exclusive, output_data); } }; /* * All tensors are in NCDHW format. * Ksize, strides, paddings are three elements. These three elements represent * depth, height and width, respectively. */ template <typename PoolProcess, class T> class Pool3dGradFunctor<platform::CUDADeviceContext, PoolProcess, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const framework::Tensor& output, const framework::Tensor& output_grad, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, PoolProcess pool_process, bool exclusive, framework::Tensor* input_grad) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_depth = input.dims()[2]; const int input_height = input.dims()[3]; const int input_width = input.dims()[4]; const int output_channels = output.dims()[1]; const int output_depth = output.dims()[2]; const int output_height = output.dims()[3]; const int output_width = output.dims()[4]; const int ksize_depth = ksize[0]; const int ksize_height = ksize[1]; const int ksize_width = ksize[2]; const int stride_depth = strides[0]; const int stride_height = strides[1]; const int stride_width = strides[2]; const int padding_depth = paddings[0]; const int padding_height = paddings[1]; const int padding_width = paddings[2]; const T* input_data = input.data<T>(); const T* output_data = output.data<T>(); const T* output_grad_data = output_grad.data<T>(); T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * input_channels * input_depth * input_height * input_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); hipLaunchKernelGGL(( KernelPool3DGrad<PoolProcess, T>), dim3(grid), dim3(threads), 0, context.stream(), nthreads, input_data, output_data, output_grad_data, input_channels, input_depth, input_height, input_width, output_depth, output_height, output_width, ksize_depth, ksize_height, ksize_width, stride_depth, stride_height, stride_width, padding_depth, padding_height, padding_width, pool_process, exclusive, input_grad_data); } }; /* * All tensors are in NCDHW format. * Ksize, strides, paddings are three elements. These three elements represent * depth, height and width, respectively. */ template <class T> class MaxPool3dGradFunctor<platform::CUDADeviceContext, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const framework::Tensor& output, const framework::Tensor& output_grad, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, framework::Tensor* input_grad) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_depth = input.dims()[2]; const int input_height = input.dims()[3]; const int input_width = input.dims()[4]; const int output_channels = output.dims()[1]; const int output_depth = output.dims()[2]; const int output_height = output.dims()[3]; const int output_width = output.dims()[4]; const int ksize_depth = ksize[0]; const int ksize_height = ksize[1]; const int ksize_width = ksize[2]; const int stride_depth = strides[0]; const int stride_height = strides[1]; const int stride_width = strides[2]; const int padding_depth = paddings[0]; const int padding_height = paddings[1]; const int padding_width = paddings[2]; const T* input_data = input.data<T>(); const T* output_data = output.data<T>(); const T* output_grad_data = output_grad.data<T>(); T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * output_channels * output_depth * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); hipLaunchKernelGGL(( KernelMaxPool3DGrad<T>), dim3(grid), dim3(threads), 0, context.stream(), nthreads, input_data, output_data, output_grad_data, input_channels, input_depth, input_height, input_width, output_depth, output_height, output_width, ksize_depth, ksize_height, ksize_width, stride_depth, stride_height, stride_width, padding_depth, padding_height, padding_width, input_grad_data); } }; template class MaxPool3dGradFunctor<platform::CUDADeviceContext, float>; template class MaxPool3dGradFunctor<platform::CUDADeviceContext, double>; template class Pool3dFunctor<platform::CUDADeviceContext, paddle::operators::math::MaxPool<float>, float>; template class Pool3dFunctor<platform::CUDADeviceContext, paddle::operators::math::AvgPool<float>, float>; template class Pool3dGradFunctor<platform::CUDADeviceContext, paddle::operators::math::MaxPoolGrad<float>, float>; template class Pool3dGradFunctor<platform::CUDADeviceContext, paddle::operators::math::AvgPoolGrad<float>, float>; template class Pool3dFunctor<platform::CUDADeviceContext, paddle::operators::math::MaxPool<double>, double>; template class Pool3dFunctor<platform::CUDADeviceContext, paddle::operators::math::AvgPool<double>, double>; template class Pool3dGradFunctor<platform::CUDADeviceContext, paddle::operators::math::MaxPoolGrad<double>, double>; template class Pool3dGradFunctor<platform::CUDADeviceContext, paddle::operators::math::AvgPoolGrad<double>, double>; template <typename T1, typename T2> __global__ void KernelMaxPool2dWithIdx( const int nthreads, const T1* input_data, const int channels, const int input_height, const int input_width, const int output_height, const int output_width, const int ksize_height, const int ksize_width, const int stride_height, const int stride_width, const int padding_height, const int padding_width, T1* output_data, T2* mask_data) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int pw = index % output_width; int ph = (index / output_width) % output_height; int c = (index / output_width / output_height) % channels; int batch_idx = index / output_width / output_height / channels; int hstart = ph * stride_height - padding_height; int hend = min(hstart + ksize_height, input_height); hstart = max(hstart, 0); int wstart = pw * stride_width - padding_width; int wend = min(wstart + ksize_width, input_width); wstart = max(wstart, 0); input_data += (batch_idx * channels + c) * input_height * input_width; T1 ele = -FLT_MAX; int max_index = -1; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { int input_index = h * input_width + w; if (ele < input_data[input_index]) { max_index = input_index; ele = input_data[input_index]; } } } output_data[index] = ele; mask_data[index] = max_index; } } template <typename T1, typename T2> __global__ void KernelMaxPool2DWithIdxGrad( const int nthreads, const T1* output_grad, const T2* mask_data, const int channels, const int input_height, const int input_width, const int output_height, const int output_width, const int ksize_height, const int ksize_width, const int stride_height, const int stride_width, const int padding_height, const int padding_width, T1* input_grad) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int w_offset = index % input_width; int h_offset = (index / input_width) % input_height; int c_offset = (index / input_width / input_height) % channels; int batch_idx = index / input_width / input_height / channels; int ph_start = (h_offset + padding_height < ksize_height) ? 0 : (h_offset + padding_height - ksize_height) / stride_height + 1; int pw_start = (w_offset + padding_width < ksize_width) ? 0 : (w_offset + padding_width - ksize_width) / stride_width + 1; int ph_end = min((h_offset + padding_height) / stride_height + 1, output_height); int pw_end = min((w_offset + padding_width) / stride_width + 1, output_width); T1 gradient = 0; int input_current_featuremap_idx = h_offset * input_width + w_offset; int output_idx = (batch_idx * channels + c_offset) * output_height * output_width; mask_data += output_idx; output_grad += output_idx; for (int ph = ph_start; ph < ph_end; ++ph) { for (int pw = pw_start; pw < pw_end; ++pw) { if (mask_data[ph * output_width + pw] == input_current_featuremap_idx) gradient += output_grad[ph * output_width + pw]; } } input_grad[index] = gradient; } } /* * All tensors are in NCHW format. * Ksize, strides, paddings are two elements. These two elements represent * height and width, respectively. */ template <typename T1, typename T2> class MaxPool2dWithIndexFunctor<platform::CUDADeviceContext, T1, T2> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, framework::Tensor* output, framework::Tensor* mask) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_height = input.dims()[2]; const int input_width = input.dims()[3]; const int output_channels = output->dims()[1]; const int output_height = output->dims()[2]; const int output_width = output->dims()[3]; const int ksize_height = ksize[0]; const int ksize_width = ksize[1]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; const T1* input_data = input.data<T1>(); T1* output_data = output->mutable_data<T1>(context.GetPlace()); T2* mask_data = mask->mutable_data<T2>(context.GetPlace()); int nthreads = batch_size * output_channels * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); hipLaunchKernelGGL(( KernelMaxPool2dWithIdx<T1, T2>), dim3(grid), dim3(threads), 0, context.stream(), nthreads, input_data, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, output_data, mask_data); } }; /* * All tensors are in NCHW format. * Ksize, strides, paddings are two elements. These two elements represent * height and width, respectively. */ template <typename T1, typename T2> class MaxPool2dWithIndexGradFunctor<platform::CUDADeviceContext, T1, T2> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& output_grad, const framework::Tensor& mask, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, framework::Tensor* input_grad) { const int batch_size = input_grad->dims()[0]; const int input_channels = input_grad->dims()[1]; const int input_height = input_grad->dims()[2]; const int input_width = input_grad->dims()[3]; const int output_height = output_grad.dims()[2]; const int output_width = output_grad.dims()[3]; const int ksize_height = ksize[0]; const int ksize_width = ksize[1]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; const T2* mask_data = mask.data<T2>(); const T1* output_grad_data = output_grad.data<T1>(); T1* input_grad_data = input_grad->mutable_data<T1>(context.GetPlace()); int nthreads = batch_size * input_channels * input_height * input_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); hipLaunchKernelGGL(( KernelMaxPool2DWithIdxGrad<T1, T2>), dim3(grid), dim3(threads), 0, context.stream(), nthreads, output_grad_data, mask_data, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, input_grad_data); } }; template class MaxPool2dWithIndexFunctor<platform::CUDADeviceContext, float, int>; template class MaxPool2dWithIndexGradFunctor<platform::CUDADeviceContext, float, int>; template class MaxPool2dWithIndexFunctor<platform::CUDADeviceContext, double, int>; template class MaxPool2dWithIndexGradFunctor<platform::CUDADeviceContext, double, int>; template <typename T1, typename T2> __global__ void KernelMaxPool3DWithIdx( const int nthreads, const T1* input_data, const int channels, const int input_depth, const int input_height, const int input_width, const int output_depth, const int output_height, const int output_width, const int ksize_depth, const int ksize_height, const int ksize_width, const int stride_depth, const int stride_height, const int stride_width, const int padding_depth, const int padding_height, const int padding_width, T1* output_data, T2* mask_data) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int pw = index % output_width; int ph = (index / output_width) % output_height; int pd = (index / output_width / output_height) % output_depth; int c = (index / output_width / output_height / output_depth) % channels; int batch_idx = index / output_width / output_height / output_depth / channels; int dstart = pd * stride_depth - padding_depth; int hstart = ph * stride_height - padding_height; int wstart = pw * stride_width - padding_width; int dend = min(dstart + ksize_depth, input_depth); int hend = min(hstart + ksize_height, input_height); int wend = min(wstart + ksize_width, input_width); dstart = max(dstart, 0); hstart = max(hstart, 0); wstart = max(wstart, 0); T1 ele = -FLT_MAX; int max_index = -1; input_data += (batch_idx * channels + c) * input_depth * input_height * input_width; for (int d = dstart; d < dend; ++d) { for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { if (ele < input_data[(d * input_height + h) * input_width + w]) { max_index = (d * input_height + h) * input_width + w; ele = input_data[max_index]; } } } } output_data[index] = ele; mask_data[index] = max_index; } } template <typename T1, typename T2> __global__ void KernelMaxPool3DWithIdxGrad( const int nthreads, const T1* output_grad, const T2* mask, const int channels, const int input_depth, const int input_height, const int input_width, const int output_depth, const int output_height, const int output_width, const int ksize_depth, const int ksize_height, const int ksize_width, const int stride_depth, const int stride_height, const int stride_width, const int padding_depth, const int padding_height, const int padding_width, T1* input_grad) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int w_offset = index % input_width; int h_offset = (index / input_width) % input_height; int d_offset = (index / input_width / input_height) % input_depth; int c_offset = (index / input_width / input_height / input_depth) % channels; int batch_idx = index / input_width / input_height / input_depth / channels; int pd_start = (d_offset + padding_depth < ksize_depth) ? 0 : (d_offset + padding_depth - ksize_depth) / stride_depth + 1; int ph_start = (h_offset + padding_height < ksize_height) ? 0 : (h_offset + padding_height - ksize_height) / stride_height + 1; int pw_start = (w_offset + padding_width < ksize_width) ? 0 : (w_offset + padding_width - ksize_width) / stride_width + 1; int pd_end = min((d_offset + padding_depth) / stride_depth + 1, output_depth); int ph_end = min((h_offset + padding_height) / stride_height + 1, output_height); int pw_end = min((w_offset + padding_width) / stride_width + 1, output_width); T1 gradient = 0; int input_current_feature_map_idx = (d_offset * input_height + h_offset) * input_width + w_offset; int output_idx = (batch_idx * channels + c_offset) * output_depth * output_height * output_width; mask += output_idx; output_grad += output_idx; for (int pd = pd_start; pd < pd_end; ++pd) { for (int ph = ph_start; ph < ph_end; ++ph) { for (int pw = pw_start; pw < pw_end; ++pw) { if (mask[(pd * output_height + ph) * output_width + pw] == input_current_feature_map_idx) gradient += output_grad[(pd * output_height + ph) * output_width + pw]; } } } input_grad[index] = gradient; } } /* * All tensors are in NCDHW format. * Ksize, strides, paddings are three elements. These three elements represent * depth, height and width, respectively. */ template <typename T1, typename T2> class MaxPool3dWithIndexFunctor<platform::CUDADeviceContext, T1, T2> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, framework::Tensor* output, framework::Tensor* mask) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_depth = input.dims()[2]; const int input_height = input.dims()[3]; const int input_width = input.dims()[4]; const int output_channels = output->dims()[1]; const int output_depth = output->dims()[2]; const int output_height = output->dims()[3]; const int output_width = output->dims()[4]; const int ksize_depth = ksize[0]; const int ksize_height = ksize[1]; const int ksize_width = ksize[2]; const int stride_depth = strides[0]; const int stride_height = strides[1]; const int stride_width = strides[2]; const int padding_depth = paddings[0]; const int padding_height = paddings[1]; const int padding_width = paddings[2]; const T1* input_data = input.data<T1>(); T1* output_data = output->mutable_data<T1>(context.GetPlace()); T2* mask_data = mask->mutable_data<T2>(context.GetPlace()); int nthreads = batch_size * output_channels * output_depth * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); hipLaunchKernelGGL(( KernelMaxPool3DWithIdx<T1, T2>), dim3(grid), dim3(threads), 0, context.stream(), nthreads, input_data, input_channels, input_depth, input_height, input_width, output_depth, output_height, output_width, ksize_depth, ksize_height, ksize_width, stride_depth, stride_height, stride_width, padding_depth, padding_height, padding_width, output_data, mask_data); } }; /* * All tensors are in NCDHW format. * Ksize, strides, paddings are three elements. These three elements represent * depth, height and width, respectively. */ template <typename T1, typename T2> class MaxPool3dWithIndexGradFunctor<platform::CUDADeviceContext, T1, T2> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& output_grad, const framework::Tensor& mask, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, framework::Tensor* input_grad) { const int batch_size = input_grad->dims()[0]; const int input_channels = input_grad->dims()[1]; const int input_depth = input_grad->dims()[2]; const int input_height = input_grad->dims()[3]; const int input_width = input_grad->dims()[4]; const int output_depth = output_grad.dims()[2]; const int output_height = output_grad.dims()[3]; const int output_width = output_grad.dims()[4]; const int ksize_depth = ksize[0]; const int ksize_height = ksize[1]; const int ksize_width = ksize[2]; const int stride_depth = strides[0]; const int stride_height = strides[1]; const int stride_width = strides[2]; const int padding_depth = paddings[0]; const int padding_height = paddings[1]; const int padding_width = paddings[2]; const T1* output_grad_data = output_grad.data<T1>(); const T2* mask_data = mask.data<T2>(); T1* input_grad_data = input_grad->mutable_data<T1>(context.GetPlace()); int nthreads = batch_size * input_channels * input_depth * input_height * input_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); hipLaunchKernelGGL(( KernelMaxPool3DWithIdxGrad<T1, T2>), dim3(grid), dim3(threads), 0, context.stream(), nthreads, output_grad_data, mask_data, input_channels, input_depth, input_height, input_width, output_depth, output_height, output_width, ksize_depth, ksize_height, ksize_width, stride_depth, stride_height, stride_width, padding_depth, padding_height, padding_width, input_grad_data); } }; template class MaxPool3dWithIndexFunctor<platform::CUDADeviceContext, float, int>; template class MaxPool3dWithIndexGradFunctor<platform::CUDADeviceContext, float, int>; template class MaxPool3dWithIndexFunctor<platform::CUDADeviceContext, double, int>; template class MaxPool3dWithIndexGradFunctor<platform::CUDADeviceContext, double, int>; } // namespace math } // namespace operators } // namespace paddle
43f6a8ff4083fae24216e1d7299902f5bca3401e.cu
/* Copyright (c) 2016 paddlepaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <algorithm> #include <vector> #include "paddle/fluid/operators/math/pooling.h" #include "paddle/fluid/platform/cuda_primitives.h" namespace paddle { namespace operators { namespace math { template <typename PoolProcess, typename T> __global__ void KernelPool2D(const int nthreads, const T* input_data, const int channels, const int input_height, const int input_width, const int output_height, const int output_width, const int ksize_height, const int ksize_width, const int stride_height, const int stride_width, const int padding_height, const int padding_width, PoolProcess pool_process, bool exclusive, T* output_data) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int pw = index % output_width; int ph = (index / output_width) % output_height; int c = (index / output_width / output_height) % channels; int batch_idx = index / output_width / output_height / channels; int hstart = ph * stride_height - padding_height; int hend = min(hstart + ksize_height, input_height); hstart = max(hstart, 0); int wstart = pw * stride_width - padding_width; int wend = min(wstart + ksize_width, input_width); wstart = max(wstart, 0); input_data += (batch_idx * channels + c) * input_height * input_width; T ele = pool_process.initial(); for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { pool_process.compute(input_data[h * input_width + w], &ele); } } int pool_size = exclusive ? (hend - hstart) * (wend - wstart) : ksize_height * ksize_width; pool_process.finalize(static_cast<T>(pool_size), &ele); output_data[index] = ele; } } template <typename PoolProcess, typename T> __global__ void KernelPool2DGrad( const int nthreads, const T* input_data, const T* output_data, const T* output_grad, const int channels, const int input_height, const int input_width, const int output_height, const int output_width, const int ksize_height, const int ksize_width, const int stride_height, const int stride_width, const int padding_height, const int padding_width, PoolProcess pool_process, bool exclusive, T* input_grad) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int offsetW = index % input_width + padding_width; int offsetH = (index / input_width) % input_height + padding_height; int offsetC = (index / input_width / input_height) % channels; int batch_idx = index / input_width / input_height / channels; int phstart = (offsetH < ksize_height) ? 0 : (offsetH - ksize_height) / stride_height + 1; int pwstart = (offsetW < ksize_width) ? 0 : (offsetW - ksize_width) / stride_width + 1; int phend = min(offsetH / stride_height + 1, output_height); int pwend = min(offsetW / stride_width + 1, output_width); T gradient = 0; T input = input_data[index]; int output_idx = (batch_idx * channels + offsetC) * output_height * output_width; output_data += output_idx; output_grad += output_idx; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { int hstart = ph * stride_height - padding_height; int wstart = pw * stride_width - padding_width; int hend = min(hstart + ksize_height, input_height); int wend = min(wstart + ksize_width, input_width); hstart = max(hstart, 0); wstart = max(wstart, 0); int pool_size = exclusive ? (hend - hstart) * (wend - wstart) : ksize_height * ksize_width; int output_sub_idx = ph * output_width + pw; pool_process.compute(input, output_data[output_sub_idx], output_grad[output_sub_idx], static_cast<T>(1.0 / pool_size), &gradient); } } input_grad[index] = gradient; } } template <typename T> __global__ void KernelMaxPool2DGrad( const int nthreads, const T* input_data, const T* output_data, const T* output_grad, const int channels, const int input_height, const int input_width, const int output_height, const int output_width, const int ksize_height, const int ksize_width, const int stride_height, const int stride_width, const int padding_height, const int padding_width, T* input_grad) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int pw = index % output_width; int ph = (index / output_width) % output_height; int c = (index / output_width / output_height) % channels; int batch_idx = index / output_width / output_height / channels; int hstart = ph * stride_height - padding_height; int hend = min(hstart + ksize_height, input_height); hstart = max(hstart, 0); int wstart = pw * stride_width - padding_width; int wend = min(wstart + ksize_width, input_width); wstart = max(wstart, 0); input_data += (batch_idx * channels + c) * input_height * input_width; input_grad += (batch_idx * channels + c) * input_height * input_width; T ele = output_data[index]; int maxIndex = -1; bool stop = false; for (int h = hstart; h < hend && !stop; ++h) { for (int w = wstart; w < wend && !stop; ++w) { if (ele == input_data[h * input_width + w]) { maxIndex = h * input_width + w; stop = true; } } } if (maxIndex != -1) { // atomic add platform::CudaAtomicAdd(input_grad + maxIndex, output_grad[index]); } } } template <typename PoolProcess, typename T> void Pool2dDirectCUDAFunctor<PoolProcess, T>::operator()( const T* input, const std::vector<int>& input_shape, const std::vector<int>& output_shape, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, PoolProcess pool_compute, bool exclusive, T* output, cudaStream_t stream) { const int batch_size = input_shape[0]; const int input_channels = input_shape[1]; const int input_height = input_shape[2]; const int input_width = input_shape[3]; const int output_channels = output_shape[1]; const int output_height = output_shape[2]; const int output_width = output_shape[3]; const int ksize_height = ksize[0]; const int ksize_width = ksize[1]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; int nthreads = batch_size * output_channels * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); KernelPool2D<PoolProcess, T><<<grid, threads, 0, stream>>>( nthreads, input, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, pool_compute, exclusive, output); } /* * All tensors are in NCHW format. * Ksize, strides, paddings are two elements. These two elements represent * height and width, respectively. */ template <typename PoolProcess, typename T> class Pool2dFunctor<platform::CUDADeviceContext, PoolProcess, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, PoolProcess pool_process, bool exclusive, framework::Tensor* output) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_height = input.dims()[2]; const int input_width = input.dims()[3]; const int output_channels = output->dims()[1]; const int output_height = output->dims()[2]; const int output_width = output->dims()[3]; const int ksize_height = ksize[0]; const int ksize_width = ksize[1]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; const T* input_data = input.data<T>(); T* output_data = output->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * output_channels * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); KernelPool2D<PoolProcess, T><<<grid, threads, 0, context.stream()>>>( nthreads, input_data, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, pool_process, exclusive, output_data); } }; /* * All tensors are in NCHW format. * Ksize, strides, paddings are two elements. These two elements represent * height and width, respectively. */ template <typename PoolProcess, typename T> class Pool2dGradFunctor<platform::CUDADeviceContext, PoolProcess, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const framework::Tensor& output, const framework::Tensor& output_grad, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, PoolProcess pool_process, bool exclusive, framework::Tensor* input_grad) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_height = input.dims()[2]; const int input_width = input.dims()[3]; const int output_height = output.dims()[2]; const int output_width = output.dims()[3]; const int ksize_height = ksize[0]; const int ksize_width = ksize[1]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; const T* input_data = input.data<T>(); const T* output_data = output.data<T>(); const T* output_grad_data = output_grad.data<T>(); T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * input_channels * input_height * input_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); KernelPool2DGrad<PoolProcess, T><<<grid, threads, 0, context.stream()>>>( nthreads, input_data, output_data, output_grad_data, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, pool_process, exclusive, input_grad_data); } }; /* * All tensors are in NCHW format. * Ksize, strides, paddings are two elements. These two elements represent * height and width, respectively. */ template <typename T> class MaxPool2dGradFunctor<platform::CUDADeviceContext, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const framework::Tensor& output, const framework::Tensor& output_grad, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, framework::Tensor* input_grad) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_height = input.dims()[2]; const int input_width = input.dims()[3]; const int output_channels = output.dims()[1]; const int output_height = output.dims()[2]; const int output_width = output.dims()[3]; const int ksize_height = ksize[0]; const int ksize_width = ksize[1]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; const T* input_data = input.data<T>(); const T* output_data = output.data<T>(); const T* output_grad_data = output_grad.data<T>(); T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * output_channels * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); KernelMaxPool2DGrad<T><<<grid, threads, 0, context.stream()>>>( nthreads, input_data, output_data, output_grad_data, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, input_grad_data); } }; template class Pool2dDirectCUDAFunctor<paddle::operators::math::MaxPool<float>, float>; template class Pool2dDirectCUDAFunctor<paddle::operators::math::AvgPool<float>, float>; template class MaxPool2dGradFunctor<platform::CUDADeviceContext, float>; template class MaxPool2dGradFunctor<platform::CUDADeviceContext, double>; template class Pool2dFunctor<platform::CUDADeviceContext, paddle::operators::math::MaxPool<float>, float>; template class Pool2dFunctor<platform::CUDADeviceContext, paddle::operators::math::AvgPool<float>, float>; template class Pool2dGradFunctor<platform::CUDADeviceContext, paddle::operators::math::MaxPoolGrad<float>, float>; template class Pool2dGradFunctor<platform::CUDADeviceContext, paddle::operators::math::AvgPoolGrad<float>, float>; template class Pool2dFunctor<platform::CUDADeviceContext, paddle::operators::math::MaxPool<double>, double>; template class Pool2dFunctor<platform::CUDADeviceContext, paddle::operators::math::AvgPool<double>, double>; template class Pool2dGradFunctor<platform::CUDADeviceContext, paddle::operators::math::MaxPoolGrad<double>, double>; template class Pool2dGradFunctor<platform::CUDADeviceContext, paddle::operators::math::AvgPoolGrad<double>, double>; template <typename PoolProcess, typename T> __global__ void KernelPool3D( const int nthreads, const T* input_data, const int channels, const int input_depth, const int input_height, const int input_width, const int output_depth, const int output_height, const int output_width, const int ksize_depth, const int ksize_height, const int ksize_width, const int stride_depth, const int stride_height, const int stride_width, const int padding_depth, const int padding_height, const int padding_width, PoolProcess pool_process, bool exclusive, T* output_data) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int pw = index % output_width; int ph = (index / output_width) % output_height; int pd = (index / output_width / output_height) % output_depth; int c = (index / output_width / output_height / output_depth) % channels; int batch_idx = index / output_width / output_height / output_depth / channels; int dstart = pd * stride_depth - padding_depth; int hstart = ph * stride_height - padding_height; int wstart = pw * stride_width - padding_width; int dend = min(dstart + ksize_depth, input_depth); int hend = min(hstart + ksize_height, input_height); int wend = min(wstart + ksize_width, input_width); dstart = max(dstart, 0); hstart = max(hstart, 0); wstart = max(wstart, 0); T ele = pool_process.initial(); input_data += (batch_idx * channels + c) * input_depth * input_height * input_width; for (int d = dstart; d < dend; ++d) { for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { pool_process.compute( input_data[(d * input_height + h) * input_width + w], &ele); } } } int pool_size = exclusive ? (dend - dstart) * (hend - hstart) * (wend - wstart) : ksize_depth * ksize_height * ksize_width; pool_process.finalize(static_cast<T>(pool_size), &ele); output_data[index] = ele; } } template <typename PoolProcess, typename T> __global__ void KernelPool3DGrad( const int nthreads, const T* input_data, const T* output_data, const T* output_grad, const int channels, const int input_depth, const int input_height, const int input_width, const int output_depth, const int output_height, const int output_width, const int ksize_depth, const int ksize_height, const int ksize_width, const int stride_depth, const int stride_height, const int stride_width, const int padding_depth, const int padding_height, const int padding_width, PoolProcess pool_process, bool exclusive, T* input_grad) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int offsetW = index % input_width + padding_width; int offsetH = (index / input_width) % input_height + padding_height; int offsetD = (index / input_width / input_height) % input_depth + padding_depth; int offsetC = (index / input_width / input_height / input_depth) % channels; int batch_idx = index / input_width / input_height / input_depth / channels; int pdstart = (offsetD < ksize_depth) ? 0 : (offsetD - ksize_depth) / stride_depth + 1; int phstart = (offsetH < ksize_height) ? 0 : (offsetH - ksize_height) / stride_height + 1; int pwstart = (offsetW < ksize_width) ? 0 : (offsetW - ksize_width) / stride_width + 1; int pdend = min((offsetD) / stride_depth + 1, output_depth); int phend = min((offsetH) / stride_height + 1, output_height); int pwend = min((offsetW) / stride_width + 1, output_width); T gradient = 0; T input = input_data[index]; int output_idx = (batch_idx * channels + offsetC) * output_depth * output_height * output_width; output_data += output_idx; output_grad += output_idx; for (int pd = pdstart; pd < pdend; ++pd) { for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { // figure out the pooling size int dstart = pd * stride_depth - padding_depth; int hstart = ph * stride_height - padding_height; int wstart = pw * stride_width - padding_width; int dend = min(dstart + ksize_depth, input_depth); int hend = min(hstart + ksize_height, input_height); int wend = min(wstart + ksize_width, input_width); dstart = max(dstart, 0); hstart = max(hstart, 0); wstart = max(wstart, 0); int pool_size = exclusive ? (dend - dstart) * (hend - hstart) * (wend - wstart) : ksize_depth * ksize_height * ksize_width; int output_sub_idx = (pd * output_height + ph) * output_width + pw; pool_process.compute(input, output_data[output_sub_idx], output_grad[output_sub_idx], static_cast<T>(1.0 / pool_size), &gradient); } } } input_grad[index] = gradient; } } template <typename T> __global__ void KernelMaxPool3DGrad( const int nthreads, const T* input_data, const T* output_data, const T* output_grad, const int channels, const int input_depth, const int input_height, const int input_width, const int output_depth, const int output_height, const int output_width, const int ksize_depth, const int ksize_height, const int ksize_width, const int stride_depth, const int stride_height, const int stride_width, const int padding_depth, const int padding_height, const int padding_width, T* input_grad) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int pw = index % output_width; int ph = (index / output_width) % output_height; int pd = (index / output_width / output_height) % output_depth; int c = (index / output_width / output_height / output_depth) % channels; int batch_idx = index / output_width / output_height / output_depth / channels; int dstart = pd * stride_depth - padding_depth; int hstart = ph * stride_height - padding_height; int wstart = pw * stride_width - padding_width; int dend = min(dstart + ksize_depth, input_depth); int hend = min(hstart + ksize_height, input_height); int wend = min(wstart + ksize_width, input_width); dstart = max(dstart, 0); hstart = max(hstart, 0); wstart = max(wstart, 0); T ele = output_data[index]; bool stop = false; int maxIdx = -1; input_data += (batch_idx * channels + c) * input_depth * input_height * input_width; input_grad += (batch_idx * channels + c) * input_depth * input_height * input_width; for (int d = dstart; d < dend && !stop; ++d) { for (int h = hstart; h < hend && !stop; ++h) { for (int w = wstart; w < wend && !stop; ++w) { if (ele == input_data[(d * input_height + h) * input_width + w]) { stop = true; maxIdx = (d * input_height + h) * input_width + w; } } } } if (maxIdx != -1) { // atomic add platform::CudaAtomicAdd(input_grad + maxIdx, output_grad[index]); } } } /* * All tensors are in NCDHW format. * Ksize, strides, paddings are three elements. These three elements represent * depth, height and width, respectively. */ template <typename PoolProcess, class T> class Pool3dFunctor<platform::CUDADeviceContext, PoolProcess, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, PoolProcess pool_process, bool exclusive, framework::Tensor* output) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_depth = input.dims()[2]; const int input_height = input.dims()[3]; const int input_width = input.dims()[4]; const int output_channels = output->dims()[1]; const int output_depth = output->dims()[2]; const int output_height = output->dims()[3]; const int output_width = output->dims()[4]; const int ksize_depth = ksize[0]; const int ksize_height = ksize[1]; const int ksize_width = ksize[2]; const int stride_depth = strides[0]; const int stride_height = strides[1]; const int stride_width = strides[2]; const int padding_depth = paddings[0]; const int padding_height = paddings[1]; const int padding_width = paddings[2]; const T* input_data = input.data<T>(); T* output_data = output->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * output_channels * output_depth * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); KernelPool3D<PoolProcess, T><<<grid, threads, 0, context.stream()>>>( nthreads, input_data, input_channels, input_depth, input_height, input_width, output_depth, output_height, output_width, ksize_depth, ksize_height, ksize_width, stride_depth, stride_height, stride_width, padding_depth, padding_height, padding_width, pool_process, exclusive, output_data); } }; /* * All tensors are in NCDHW format. * Ksize, strides, paddings are three elements. These three elements represent * depth, height and width, respectively. */ template <typename PoolProcess, class T> class Pool3dGradFunctor<platform::CUDADeviceContext, PoolProcess, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const framework::Tensor& output, const framework::Tensor& output_grad, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, PoolProcess pool_process, bool exclusive, framework::Tensor* input_grad) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_depth = input.dims()[2]; const int input_height = input.dims()[3]; const int input_width = input.dims()[4]; const int output_channels = output.dims()[1]; const int output_depth = output.dims()[2]; const int output_height = output.dims()[3]; const int output_width = output.dims()[4]; const int ksize_depth = ksize[0]; const int ksize_height = ksize[1]; const int ksize_width = ksize[2]; const int stride_depth = strides[0]; const int stride_height = strides[1]; const int stride_width = strides[2]; const int padding_depth = paddings[0]; const int padding_height = paddings[1]; const int padding_width = paddings[2]; const T* input_data = input.data<T>(); const T* output_data = output.data<T>(); const T* output_grad_data = output_grad.data<T>(); T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * input_channels * input_depth * input_height * input_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); KernelPool3DGrad<PoolProcess, T><<<grid, threads, 0, context.stream()>>>( nthreads, input_data, output_data, output_grad_data, input_channels, input_depth, input_height, input_width, output_depth, output_height, output_width, ksize_depth, ksize_height, ksize_width, stride_depth, stride_height, stride_width, padding_depth, padding_height, padding_width, pool_process, exclusive, input_grad_data); } }; /* * All tensors are in NCDHW format. * Ksize, strides, paddings are three elements. These three elements represent * depth, height and width, respectively. */ template <class T> class MaxPool3dGradFunctor<platform::CUDADeviceContext, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const framework::Tensor& output, const framework::Tensor& output_grad, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, framework::Tensor* input_grad) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_depth = input.dims()[2]; const int input_height = input.dims()[3]; const int input_width = input.dims()[4]; const int output_channels = output.dims()[1]; const int output_depth = output.dims()[2]; const int output_height = output.dims()[3]; const int output_width = output.dims()[4]; const int ksize_depth = ksize[0]; const int ksize_height = ksize[1]; const int ksize_width = ksize[2]; const int stride_depth = strides[0]; const int stride_height = strides[1]; const int stride_width = strides[2]; const int padding_depth = paddings[0]; const int padding_height = paddings[1]; const int padding_width = paddings[2]; const T* input_data = input.data<T>(); const T* output_data = output.data<T>(); const T* output_grad_data = output_grad.data<T>(); T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * output_channels * output_depth * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); KernelMaxPool3DGrad<T><<<grid, threads, 0, context.stream()>>>( nthreads, input_data, output_data, output_grad_data, input_channels, input_depth, input_height, input_width, output_depth, output_height, output_width, ksize_depth, ksize_height, ksize_width, stride_depth, stride_height, stride_width, padding_depth, padding_height, padding_width, input_grad_data); } }; template class MaxPool3dGradFunctor<platform::CUDADeviceContext, float>; template class MaxPool3dGradFunctor<platform::CUDADeviceContext, double>; template class Pool3dFunctor<platform::CUDADeviceContext, paddle::operators::math::MaxPool<float>, float>; template class Pool3dFunctor<platform::CUDADeviceContext, paddle::operators::math::AvgPool<float>, float>; template class Pool3dGradFunctor<platform::CUDADeviceContext, paddle::operators::math::MaxPoolGrad<float>, float>; template class Pool3dGradFunctor<platform::CUDADeviceContext, paddle::operators::math::AvgPoolGrad<float>, float>; template class Pool3dFunctor<platform::CUDADeviceContext, paddle::operators::math::MaxPool<double>, double>; template class Pool3dFunctor<platform::CUDADeviceContext, paddle::operators::math::AvgPool<double>, double>; template class Pool3dGradFunctor<platform::CUDADeviceContext, paddle::operators::math::MaxPoolGrad<double>, double>; template class Pool3dGradFunctor<platform::CUDADeviceContext, paddle::operators::math::AvgPoolGrad<double>, double>; template <typename T1, typename T2> __global__ void KernelMaxPool2dWithIdx( const int nthreads, const T1* input_data, const int channels, const int input_height, const int input_width, const int output_height, const int output_width, const int ksize_height, const int ksize_width, const int stride_height, const int stride_width, const int padding_height, const int padding_width, T1* output_data, T2* mask_data) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int pw = index % output_width; int ph = (index / output_width) % output_height; int c = (index / output_width / output_height) % channels; int batch_idx = index / output_width / output_height / channels; int hstart = ph * stride_height - padding_height; int hend = min(hstart + ksize_height, input_height); hstart = max(hstart, 0); int wstart = pw * stride_width - padding_width; int wend = min(wstart + ksize_width, input_width); wstart = max(wstart, 0); input_data += (batch_idx * channels + c) * input_height * input_width; T1 ele = -FLT_MAX; int max_index = -1; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { int input_index = h * input_width + w; if (ele < input_data[input_index]) { max_index = input_index; ele = input_data[input_index]; } } } output_data[index] = ele; mask_data[index] = max_index; } } template <typename T1, typename T2> __global__ void KernelMaxPool2DWithIdxGrad( const int nthreads, const T1* output_grad, const T2* mask_data, const int channels, const int input_height, const int input_width, const int output_height, const int output_width, const int ksize_height, const int ksize_width, const int stride_height, const int stride_width, const int padding_height, const int padding_width, T1* input_grad) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int w_offset = index % input_width; int h_offset = (index / input_width) % input_height; int c_offset = (index / input_width / input_height) % channels; int batch_idx = index / input_width / input_height / channels; int ph_start = (h_offset + padding_height < ksize_height) ? 0 : (h_offset + padding_height - ksize_height) / stride_height + 1; int pw_start = (w_offset + padding_width < ksize_width) ? 0 : (w_offset + padding_width - ksize_width) / stride_width + 1; int ph_end = min((h_offset + padding_height) / stride_height + 1, output_height); int pw_end = min((w_offset + padding_width) / stride_width + 1, output_width); T1 gradient = 0; int input_current_featuremap_idx = h_offset * input_width + w_offset; int output_idx = (batch_idx * channels + c_offset) * output_height * output_width; mask_data += output_idx; output_grad += output_idx; for (int ph = ph_start; ph < ph_end; ++ph) { for (int pw = pw_start; pw < pw_end; ++pw) { if (mask_data[ph * output_width + pw] == input_current_featuremap_idx) gradient += output_grad[ph * output_width + pw]; } } input_grad[index] = gradient; } } /* * All tensors are in NCHW format. * Ksize, strides, paddings are two elements. These two elements represent * height and width, respectively. */ template <typename T1, typename T2> class MaxPool2dWithIndexFunctor<platform::CUDADeviceContext, T1, T2> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, framework::Tensor* output, framework::Tensor* mask) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_height = input.dims()[2]; const int input_width = input.dims()[3]; const int output_channels = output->dims()[1]; const int output_height = output->dims()[2]; const int output_width = output->dims()[3]; const int ksize_height = ksize[0]; const int ksize_width = ksize[1]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; const T1* input_data = input.data<T1>(); T1* output_data = output->mutable_data<T1>(context.GetPlace()); T2* mask_data = mask->mutable_data<T2>(context.GetPlace()); int nthreads = batch_size * output_channels * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); KernelMaxPool2dWithIdx<T1, T2><<<grid, threads, 0, context.stream()>>>( nthreads, input_data, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, output_data, mask_data); } }; /* * All tensors are in NCHW format. * Ksize, strides, paddings are two elements. These two elements represent * height and width, respectively. */ template <typename T1, typename T2> class MaxPool2dWithIndexGradFunctor<platform::CUDADeviceContext, T1, T2> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& output_grad, const framework::Tensor& mask, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, framework::Tensor* input_grad) { const int batch_size = input_grad->dims()[0]; const int input_channels = input_grad->dims()[1]; const int input_height = input_grad->dims()[2]; const int input_width = input_grad->dims()[3]; const int output_height = output_grad.dims()[2]; const int output_width = output_grad.dims()[3]; const int ksize_height = ksize[0]; const int ksize_width = ksize[1]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; const T2* mask_data = mask.data<T2>(); const T1* output_grad_data = output_grad.data<T1>(); T1* input_grad_data = input_grad->mutable_data<T1>(context.GetPlace()); int nthreads = batch_size * input_channels * input_height * input_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); KernelMaxPool2DWithIdxGrad<T1, T2><<<grid, threads, 0, context.stream()>>>( nthreads, output_grad_data, mask_data, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, input_grad_data); } }; template class MaxPool2dWithIndexFunctor<platform::CUDADeviceContext, float, int>; template class MaxPool2dWithIndexGradFunctor<platform::CUDADeviceContext, float, int>; template class MaxPool2dWithIndexFunctor<platform::CUDADeviceContext, double, int>; template class MaxPool2dWithIndexGradFunctor<platform::CUDADeviceContext, double, int>; template <typename T1, typename T2> __global__ void KernelMaxPool3DWithIdx( const int nthreads, const T1* input_data, const int channels, const int input_depth, const int input_height, const int input_width, const int output_depth, const int output_height, const int output_width, const int ksize_depth, const int ksize_height, const int ksize_width, const int stride_depth, const int stride_height, const int stride_width, const int padding_depth, const int padding_height, const int padding_width, T1* output_data, T2* mask_data) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int pw = index % output_width; int ph = (index / output_width) % output_height; int pd = (index / output_width / output_height) % output_depth; int c = (index / output_width / output_height / output_depth) % channels; int batch_idx = index / output_width / output_height / output_depth / channels; int dstart = pd * stride_depth - padding_depth; int hstart = ph * stride_height - padding_height; int wstart = pw * stride_width - padding_width; int dend = min(dstart + ksize_depth, input_depth); int hend = min(hstart + ksize_height, input_height); int wend = min(wstart + ksize_width, input_width); dstart = max(dstart, 0); hstart = max(hstart, 0); wstart = max(wstart, 0); T1 ele = -FLT_MAX; int max_index = -1; input_data += (batch_idx * channels + c) * input_depth * input_height * input_width; for (int d = dstart; d < dend; ++d) { for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { if (ele < input_data[(d * input_height + h) * input_width + w]) { max_index = (d * input_height + h) * input_width + w; ele = input_data[max_index]; } } } } output_data[index] = ele; mask_data[index] = max_index; } } template <typename T1, typename T2> __global__ void KernelMaxPool3DWithIdxGrad( const int nthreads, const T1* output_grad, const T2* mask, const int channels, const int input_depth, const int input_height, const int input_width, const int output_depth, const int output_height, const int output_width, const int ksize_depth, const int ksize_height, const int ksize_width, const int stride_depth, const int stride_height, const int stride_width, const int padding_depth, const int padding_height, const int padding_width, T1* input_grad) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int w_offset = index % input_width; int h_offset = (index / input_width) % input_height; int d_offset = (index / input_width / input_height) % input_depth; int c_offset = (index / input_width / input_height / input_depth) % channels; int batch_idx = index / input_width / input_height / input_depth / channels; int pd_start = (d_offset + padding_depth < ksize_depth) ? 0 : (d_offset + padding_depth - ksize_depth) / stride_depth + 1; int ph_start = (h_offset + padding_height < ksize_height) ? 0 : (h_offset + padding_height - ksize_height) / stride_height + 1; int pw_start = (w_offset + padding_width < ksize_width) ? 0 : (w_offset + padding_width - ksize_width) / stride_width + 1; int pd_end = min((d_offset + padding_depth) / stride_depth + 1, output_depth); int ph_end = min((h_offset + padding_height) / stride_height + 1, output_height); int pw_end = min((w_offset + padding_width) / stride_width + 1, output_width); T1 gradient = 0; int input_current_feature_map_idx = (d_offset * input_height + h_offset) * input_width + w_offset; int output_idx = (batch_idx * channels + c_offset) * output_depth * output_height * output_width; mask += output_idx; output_grad += output_idx; for (int pd = pd_start; pd < pd_end; ++pd) { for (int ph = ph_start; ph < ph_end; ++ph) { for (int pw = pw_start; pw < pw_end; ++pw) { if (mask[(pd * output_height + ph) * output_width + pw] == input_current_feature_map_idx) gradient += output_grad[(pd * output_height + ph) * output_width + pw]; } } } input_grad[index] = gradient; } } /* * All tensors are in NCDHW format. * Ksize, strides, paddings are three elements. These three elements represent * depth, height and width, respectively. */ template <typename T1, typename T2> class MaxPool3dWithIndexFunctor<platform::CUDADeviceContext, T1, T2> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, framework::Tensor* output, framework::Tensor* mask) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_depth = input.dims()[2]; const int input_height = input.dims()[3]; const int input_width = input.dims()[4]; const int output_channels = output->dims()[1]; const int output_depth = output->dims()[2]; const int output_height = output->dims()[3]; const int output_width = output->dims()[4]; const int ksize_depth = ksize[0]; const int ksize_height = ksize[1]; const int ksize_width = ksize[2]; const int stride_depth = strides[0]; const int stride_height = strides[1]; const int stride_width = strides[2]; const int padding_depth = paddings[0]; const int padding_height = paddings[1]; const int padding_width = paddings[2]; const T1* input_data = input.data<T1>(); T1* output_data = output->mutable_data<T1>(context.GetPlace()); T2* mask_data = mask->mutable_data<T2>(context.GetPlace()); int nthreads = batch_size * output_channels * output_depth * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); KernelMaxPool3DWithIdx<T1, T2><<<grid, threads, 0, context.stream()>>>( nthreads, input_data, input_channels, input_depth, input_height, input_width, output_depth, output_height, output_width, ksize_depth, ksize_height, ksize_width, stride_depth, stride_height, stride_width, padding_depth, padding_height, padding_width, output_data, mask_data); } }; /* * All tensors are in NCDHW format. * Ksize, strides, paddings are three elements. These three elements represent * depth, height and width, respectively. */ template <typename T1, typename T2> class MaxPool3dWithIndexGradFunctor<platform::CUDADeviceContext, T1, T2> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& output_grad, const framework::Tensor& mask, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, framework::Tensor* input_grad) { const int batch_size = input_grad->dims()[0]; const int input_channels = input_grad->dims()[1]; const int input_depth = input_grad->dims()[2]; const int input_height = input_grad->dims()[3]; const int input_width = input_grad->dims()[4]; const int output_depth = output_grad.dims()[2]; const int output_height = output_grad.dims()[3]; const int output_width = output_grad.dims()[4]; const int ksize_depth = ksize[0]; const int ksize_height = ksize[1]; const int ksize_width = ksize[2]; const int stride_depth = strides[0]; const int stride_height = strides[1]; const int stride_width = strides[2]; const int padding_depth = paddings[0]; const int padding_height = paddings[1]; const int padding_width = paddings[2]; const T1* output_grad_data = output_grad.data<T1>(); const T2* mask_data = mask.data<T2>(); T1* input_grad_data = input_grad->mutable_data<T1>(context.GetPlace()); int nthreads = batch_size * input_channels * input_depth * input_height * input_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); KernelMaxPool3DWithIdxGrad<T1, T2><<<grid, threads, 0, context.stream()>>>( nthreads, output_grad_data, mask_data, input_channels, input_depth, input_height, input_width, output_depth, output_height, output_width, ksize_depth, ksize_height, ksize_width, stride_depth, stride_height, stride_width, padding_depth, padding_height, padding_width, input_grad_data); } }; template class MaxPool3dWithIndexFunctor<platform::CUDADeviceContext, float, int>; template class MaxPool3dWithIndexGradFunctor<platform::CUDADeviceContext, float, int>; template class MaxPool3dWithIndexFunctor<platform::CUDADeviceContext, double, int>; template class MaxPool3dWithIndexGradFunctor<platform::CUDADeviceContext, double, int>; } // namespace math } // namespace operators } // namespace paddle
2e06b7f4a89b62c9bcb768d3b83400b6dca68a7c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "gsbn/procedures/ProcUpdLazy/Proj.hpp" #ifndef CPU_ONLY //#include <thrust/copy.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/functional.h> using namespace thrust::placeholders; namespace gsbn{ namespace proc_upd_lazy{ __global__ void update_all_kernel_gpu( int dim_conn, int dim_mcu, float *ptr_pi, float *ptr_ei, float *ptr_zi, int *ptr_ti, const float *ptr_pj, float *ptr_pij, float *ptr_eij, float *ptr_zi2, float *ptr_zj2, int *ptr_tij, float *ptr_wij, int simstep, float kp, float ke, float kzi, float kzj, float wgain, float eps, float eps2 ){ int i=blockIdx.y*gridDim.x+blockIdx.x; int j=threadIdx.x; __shared__ float sh_pi; if(j==0){ float pi = ptr_pi[i]; float zi = ptr_zi[i]; int ti = ptr_ti[i]; int pdt = simstep - ti; if(pdt>0){ float ei = ptr_ei[i]; pi = (pi - ((ei*kp*kzi - ei*ke*kp + ke*kp*zi)/(ke - kp) + (ke*kp*zi)/(kp - kzi))/(ke - kzi))/exp(kp*pdt) + ((exp(kp*pdt - ke*pdt)*(ei*kp*kzi - ei*ke*kp + ke*kp*zi))/(ke - kp) + (ke*kp*zi*exp(kp*pdt - kzi*pdt))/(kp - kzi))/(exp(kp*pdt)*(ke - kzi)); ei = (ei - (ke*zi)/(ke - kzi))/exp(ke*pdt) + (ke*zi*exp(ke*pdt - kzi*pdt))/(exp(ke*pdt)*(ke - kzi)); zi = zi*exp(-kzi*pdt); ti = simstep; ptr_pi[i] = pi; ptr_ei[i] = ei; ptr_zi[i] = zi; ptr_ti[i] = ti; } sh_pi = pi; } __syncthreads(); int index = i*dim_mcu+j; int tij = ptr_tij[index]; float zi2 = ptr_zi2[index]; int pdt = simstep - tij; if(pdt<=0){ ptr_tij[index]=simstep; }else{ float pij = ptr_pij[index]; float eij = ptr_eij[index]; float zj2 = ptr_zj2[index]; pij = (pij + ((eij*kp*kzi - eij*ke*kp + eij*kp*kzj + ke*kp*zi2*zj2)/(ke - kp) - (ke*kp*zi2*zj2)/(kzi - kp + kzj))/(kzi - ke + kzj))/exp(kp*pdt) - ((exp(kp*pdt - ke*pdt)*(eij*kp*kzi - eij*ke*kp + eij*kp*kzj + ke*kp*zi2*zj2))/(ke - kp) - (ke*kp*zi2*zj2*exp(kp*pdt - kzi*pdt - kzj*pdt))/ (kzi - kp + kzj))/(exp(kp*pdt)*(kzi - ke + kzj)); eij = (eij + (ke*zi2*zj2)/(kzi - ke + kzj))/exp(ke*pdt) - (ke*zi2*zj2)/(exp(kzi*pdt)*exp(kzj*pdt)*(kzi - ke + kzj)); zi2 = zi2*exp(-kzi*pdt); zj2 = zj2*exp(-kzj*pdt); tij = simstep; ptr_pij[index] = pij; ptr_eij[index] = eij; ptr_zi2[index] = zi2; ptr_zj2[index] = zj2; ptr_tij[index] = tij; // update wij and epsc float wij; if(kp){ float pi = sh_pi; float pj = ptr_pj[i/dim_conn*dim_mcu + j]; /* * Wij calculation: Original */ wij = wgain * log((pij + eps2)/((pi + eps)*(pj + eps))); /* * Wij calculation: Modified */ /* if(pi<eps || pj<eps){ wij=0; }else{ wij = wgain * log(pij/(pi*pj)); } */ ptr_wij[index] = wij; } } } __global__ void update_jxx_kernel_gpu( int n, const int8_t *ptr_sj, float *ptr_pj, float *ptr_ej, float *ptr_zj, float *ptr_bj, float *ptr_epsc, float kp, float ke, float kzj, float kzi, float kepsc, float kftj, float bgain, float eps ){ CUDA_KERNEL_LOOP(idx, n){ float pj = ptr_pj[idx]; float ej = ptr_ej[idx]; float zj = ptr_zj[idx]; int8_t sj = ptr_sj[idx]; ptr_epsc[idx] *= (1-kepsc); pj += (ej - pj)*kp; ej += (zj - ej)*ke; zj *= (1-kzj); if(sj>0){ zj += kftj; } if(kp){ float bj = bgain * log(pj + eps); ptr_bj[idx]=bj; } ptr_pj[idx] = pj; ptr_ej[idx] = ej; ptr_zj[idx] = zj; } } __global__ void update_row_kernel_gpu( int dim_conn, int dim_mcu, const int *ptr_ssi, float *ptr_pi, float *ptr_ei, float *ptr_zi, int *ptr_ti, const float *ptr_pj, float *ptr_pij, float *ptr_eij, float *ptr_zi2, float *ptr_zj2, int *ptr_tij, float* ptr_wij, float* ptr_epsc, int simstep, float kp, float ke, float kzi, float kzj, float kfti, float wgain, float eps, float eps2 ){ int i = blockIdx.x; int j = threadIdx.x; int row = ptr_ssi[i]; int col = j; int index = row*dim_mcu+col; __shared__ float sh_pi; if(j==0){ float pi = ptr_pi[row]; float zi = ptr_zi[row]; int ti = ptr_ti[row]; int pdt = simstep - ti; if(pdt<=0){ ptr_zi[row] += kfti; ptr_ti[row] = simstep; }else{ float ei = ptr_ei[row]; pi = (pi - ((ei*kp*kzi - ei*ke*kp + ke*kp*zi)/(ke - kp) + (ke*kp*zi)/(kp - kzi))/(ke - kzi))/exp(kp*pdt) + ((exp(kp*pdt - ke*pdt)*(ei*kp*kzi - ei*ke*kp + ke*kp*zi))/(ke - kp) + (ke*kp*zi*exp(kp*pdt - kzi*pdt))/(kp - kzi))/(exp(kp*pdt)*(ke - kzi)); ei = (ei - (ke*zi)/(ke - kzi))/exp(ke*pdt) + (ke*zi*exp(ke*pdt - kzi*pdt))/(exp(ke*pdt)*(ke - kzi)); zi = zi*exp(-kzi*pdt) + kfti; ti = simstep; ptr_pi[row] = pi; ptr_ei[row] = ei; ptr_zi[row] = zi; ptr_ti[row] = ti; } sh_pi = pi; } __syncthreads(); float pij = ptr_pij[index]; int tij = ptr_tij[index]; float zi2 = ptr_zi2[index]; int pdt = simstep - tij; if(pdt<=0){ ptr_zi2[index] += kfti; ptr_tij[index] = simstep; }else{ float eij = ptr_eij[index]; float zj2 = ptr_zj2[index]; pij = (pij + ((eij*kp*kzi - eij*ke*kp + eij*kp*kzj + ke*kp*zi2*zj2)/(ke - kp) - (ke*kp*zi2*zj2)/(kzi - kp + kzj))/(kzi - ke + kzj))/exp(kp*pdt) - ((exp(kp*pdt - ke*pdt)*(eij*kp*kzi - eij*ke*kp + eij*kp*kzj + ke*kp*zi2*zj2))/(ke - kp) - (ke*kp*zi2*zj2*exp(kp*pdt - kzi*pdt - kzj*pdt))/ (kzi - kp + kzj))/(exp(kp*pdt)*(kzi - ke + kzj)); eij = (eij + (ke*zi2*zj2)/(kzi - ke + kzj))/exp(ke*pdt) - (ke*zi2*zj2)/(exp(kzi*pdt)*exp(kzj*pdt)*(kzi - ke + kzj)); zi2 = zi2*exp(-kzi*pdt)+kfti; zj2 = zj2*exp(-kzj*pdt); tij = simstep; ptr_pij[index] = pij; ptr_eij[index] = eij; ptr_zi2[index] = zi2; ptr_zj2[index] = zj2; ptr_tij[index] = tij; float wij; int idx_hcu = row / dim_conn; int idx_mcu = idx_hcu * dim_mcu + j; if(kp){ float pi = sh_pi; float pj = ptr_pj[idx_mcu]; /* * Wij calculation: Original */ wij = wgain * log((pij + eps2)/((pi + eps)*(pj + eps))); /* * Wij calculation: Modified */ /* if(pi<eps || pj<eps){ wij=0; }else{ wij = wgain * log(pij/(pi*pj)); } */ ptr_wij[index] = wij; }else{ wij = ptr_wij[index]; } atomicAdd(&ptr_epsc[idx_mcu], wij); } } __global__ void update_col_kernel_gpu( int n, int active_col_num, int dim_conn, int dim_mcu, const int *ptr_ii, const int *ptr_ssj, float *ptr_pij, float *ptr_eij, float *ptr_zi2, float *ptr_zj2, int *ptr_tij, int simstep, float kp, float ke, float kzi, float kzj, float kftj ){ CUDA_KERNEL_LOOP(idx, n){ int j = idx/dim_conn; int i = idx%dim_conn; int row = ptr_ssj[j]/dim_mcu*dim_conn+i; if(ptr_ii[row]<0){ return; } int col = ptr_ssj[j]%dim_mcu; int index = row*dim_mcu+col; int tij = ptr_tij[index]; float zj2 = ptr_zj2[index]; int pdt = simstep - tij; if(pdt<=0){ zj2 += kftj; ptr_zj2[index]=zj2; ptr_tij[index]=simstep; }else{ float pij = ptr_pij[index]; float eij = ptr_eij[index]; float zi2 = ptr_zi2[index]; pij = (pij + ((eij*kp*kzi - eij*ke*kp + eij*kp*kzj + ke*kp*zi2*zj2)/(ke - kp) - (ke*kp*zi2*zj2)/(kzi - kp + kzj))/(kzi - ke + kzj))/exp(kp*pdt) - ((exp(kp*pdt - ke*pdt)*(eij*kp*kzi - eij*ke*kp + eij*kp*kzj + ke*kp*zi2*zj2))/(ke - kp) - (ke*kp*zi2*zj2*exp(kp*pdt - kzi*pdt - kzj*pdt))/ (kzi - kp + kzj))/(exp(kp*pdt)*(kzi - ke + kzj)); eij = (eij + (ke*zi2*zj2)/(kzi - ke + kzj))/exp(ke*pdt) - (ke*zi2*zj2)/(exp(kzi*pdt)*exp(kzj*pdt)*(kzi - ke + kzj)); zi2 = zi2*exp(-kzi*pdt); zj2 = zj2*exp(-kzj*pdt)+kftj; tij = simstep; ptr_pij[index] = pij; ptr_eij[index] = eij; ptr_zi2[index] = zi2; ptr_zj2[index] = zj2; ptr_tij[index] = tij; } } } void Proj::update_all_gpu(){ int simstep; float prn; float old_prn; CHECK(_glv.geti("simstep", simstep)); CHECK(_glv.getf("prn", prn)); CHECK(_glv.getf("old-prn", old_prn)); if(old_prn!=prn){ float *ptr_pi = _pi->mutable_gpu_data(); float *ptr_ei = _ei->mutable_gpu_data(); float *ptr_zi = _zi->mutable_gpu_data(); int *ptr_ti = _ti->mutable_gpu_data(); const float *ptr_pj = _pj->gpu_data(); float *ptr_pij = _pij->mutable_gpu_data(); float *ptr_eij = _eij->mutable_gpu_data(); float *ptr_zi2 = _zi2->mutable_gpu_data(); float *ptr_zj2 = _zj2->mutable_gpu_data(); int *ptr_tij = _tij->mutable_gpu_data(); float *ptr_wij = _wij->mutable_gpu_data(); const dim3 GRID_SIZE(_dim_conn, _dim_hcu); hipLaunchKernelGGL(( update_all_kernel_gpu), dim3(GRID_SIZE), dim3(_dim_mcu), 0, _stream, _dim_conn, _dim_mcu, ptr_pi, ptr_ei, ptr_zi, ptr_ti, ptr_pj, ptr_pij, ptr_eij, ptr_zi2, ptr_zj2, ptr_tij, ptr_wij, simstep-1, _taupdt*old_prn, _tauedt, _tauzidt, _tauzjdt, _wgain, _eps, _eps2 ); CUDA_POST_KERNEL_CHECK; } } void Proj::update_jxx_gpu(){ float prn; CHECK(_glv.getf("prn", prn)); float *ptr_pj = _pj->mutable_gpu_data(); float *ptr_ej = _ej->mutable_gpu_data(); float *ptr_zj = _zj->mutable_gpu_data(); float *ptr_bj = _bj->mutable_gpu_data()+_proj_in_pop*_dim_hcu*_dim_mcu; float *ptr_epsc = _epsc->mutable_gpu_data()+_proj_in_pop*_dim_hcu*_dim_mcu; const int8_t *ptr_sj = _sj->gpu_data(); CUDA_CHECK(hipSetDevice(_device)); hipLaunchKernelGGL(( update_jxx_kernel_gpu), dim3(GSBN_GET_BLOCKS(_dim_hcu*_dim_mcu)), dim3(GSBN_GET_THREADS(_dim_hcu*_dim_mcu)), 0, _stream, _dim_hcu*_dim_mcu, ptr_sj, ptr_pj, ptr_ej, ptr_zj, ptr_bj, ptr_epsc, _taupdt*prn, _tauedt, _tauzjdt, _tauzidt, _tauepscdt, _kftj, _bgain, _eps ); CUDA_POST_KERNEL_CHECK; } __global__ void update_ss_kernel_gpu ( int n, const int8_t *s, int *ss, int *global_size ){ extern __shared__ int shmem0[]; int i=threadIdx.x; __shared__ int size; __shared__ int global_index; CUDA_KERNEL_LOOP(idx, n){ if(i==0){ size = 0; } __syncthreads(); if(s[idx]>0){ int index = atomicInc((unsigned int*)(&size), 1024); shmem0[index] = idx; } __syncthreads(); if(i==0){ global_index = atomicAdd(global_size, size); } __syncthreads(); if(i<size){ ss[global_index+i] = shmem0[i]; } __syncthreads(); } } __global__ void update_que_kernel_gpu( int n, const int *ptr_ii, const int *ptr_di, const int8_t *ptr_si, int *ptr_qi, int8_t *ptr_siq ){ CUDA_KERNEL_LOOP(i, n){ int ii=ptr_ii[i]; if(ii>=0){ int32_t qi = ptr_qi[i]; ptr_siq[i] = int8_t(qi & 0x01); int8_t spk = ptr_si[ii]; if(spk>0){ qi |= (0x01 << ptr_di[i]); } qi >>= 1; ptr_qi[i]=qi; } } } void Proj::update_row_gpu(){ int active_row_num = _ssi->gpu_vector()->size(); if(active_row_num<=0){ return; } int simstep; float prn; CHECK(_glv.geti("simstep", simstep)); CHECK(_glv.getf("prn", prn)); float *ptr_pi = _pi->mutable_gpu_data(); float *ptr_ei = _ei->mutable_gpu_data(); float *ptr_zi = _zi->mutable_gpu_data(); int *ptr_ti = _ti->mutable_gpu_data(); const float *ptr_pj = _pj->gpu_data(); float *ptr_pij = _pij->mutable_gpu_data(); float *ptr_eij = _eij->mutable_gpu_data(); float *ptr_zi2 = _zi2->mutable_gpu_data(); float *ptr_zj2 = _zj2->mutable_gpu_data(); int *ptr_tij = _tij->mutable_gpu_data(); float *ptr_wij = _wij->mutable_gpu_data(); float *ptr_epsc = _epsc->mutable_gpu_data()+ _proj_in_pop * _dim_hcu * _dim_mcu; const int *ptr_ssi = _ssi->gpu_data(); hipLaunchKernelGGL(( update_row_kernel_gpu), dim3(active_row_num), dim3(_dim_mcu), 0, _stream, _dim_conn, _dim_mcu, ptr_ssi, ptr_pi, ptr_ei, ptr_zi, ptr_ti, ptr_pj, ptr_pij, ptr_eij, ptr_zi2, ptr_zj2, ptr_tij, ptr_wij, ptr_epsc, simstep, _taupdt*prn, _tauedt, _tauzidt, _tauzjdt, _kfti, _wgain, _eps, _eps2 ); CUDA_POST_KERNEL_CHECK; } void Proj::update_col_gpu(){ int active_col_num = _ssj->gpu_vector()->size(); if(active_col_num<=0){ return; } int simstep; float prn; CHECK(_glv.geti("simstep", simstep)); CHECK(_glv.getf("prn", prn)); float *ptr_pij = _pij->mutable_gpu_data(); float *ptr_eij = _eij->mutable_gpu_data(); float *ptr_zi2 = _zi2->mutable_gpu_data(); float *ptr_zj2 = _zj2->mutable_gpu_data(); int *ptr_tij = _tij->mutable_gpu_data(); const int *ptr_ii = _ii->gpu_data(); const int *ptr_ssj = _ssj->gpu_data(); hipLaunchKernelGGL(( update_col_kernel_gpu), dim3(GSBN_GET_BLOCKS(_dim_conn*active_col_num)), dim3(GSBN_GET_THREADS(_dim_conn*active_col_num)), 0, _stream, _dim_conn*active_col_num, active_col_num, _dim_conn, _dim_mcu, ptr_ii, ptr_ssj, ptr_pij, ptr_eij, ptr_zi2, ptr_zj2, ptr_tij, simstep, _taupdt * prn, _tauedt, _tauzidt, _tauzjdt, _kftj ); CUDA_POST_KERNEL_CHECK; } /* void Proj::update_ssi_gpu(){ CONST_DEVICE_VECTOR(int, *v_siq) = _siq->gpu_vector(); _ssi->resize(v_siq->size()); DEVICE_VECTOR(int, *v_ssi) = _ssi->mutable_gpu_vector(); auto it = copy_if( #ifndef CUDA_VERSION_LEGACY thrust::hip::par.on(_stream), #endif thrust::make_counting_iterator<int>(0), thrust::make_counting_iterator<int>(v_siq->size()), v_siq->begin(), v_ssi->begin(), _1>0); _ssi->resize(thrust::distance(v_ssi->begin(), it)); } void Proj::update_ssj_gpu(){ int simstep; CHECK(_glv.geti("simstep", simstep)); CONST_DEVICE_VECTOR(int8_t, *v_sj) = _sj->gpu_vector(); _ssj->resize(_dim_hcu*_dim_mcu); DEVICE_VECTOR(int, *v_ssj) = _ssj->mutable_gpu_vector(); auto it = copy_if( #ifndef CUDA_VERSION_LEGACY thrust::hip::par.on(_stream), #endif thrust::make_counting_iterator<int>(0), thrust::make_counting_iterator<int>(_dim_hcu*_dim_mcu), v_sj->begin(), v_ssj->begin(), _1>0); _ssj->resize(thrust::distance(v_ssj->begin(), it)); } */ void Proj::update_ssi_gpu(){ _ssi->resize(_siq->size()); const int8_t* ptr_siq = _siq->gpu_data(); int *ptr_ssi = _ssi->mutable_gpu_data(); int global_size_host; int *ptr_global_size_device; hipMalloc (&ptr_global_size_device, sizeof(int)); hipMemset (ptr_global_size_device, 0, sizeof(int)); hipLaunchKernelGGL(( update_ss_kernel_gpu), dim3(GSBN_GET_BLOCKS(_ssi->size())), dim3(GSBN_GET_THREADS(_ssi->size())), GSBN_CUDA_NUM_THREADS*sizeof(int), _stream, _ssi->size(), ptr_siq, ptr_ssi, ptr_global_size_device ); CUDA_POST_KERNEL_CHECK; hipMemcpyAsync(&global_size_host, ptr_global_size_device, sizeof(int), hipMemcpyDeviceToHost, _stream); _ssi->resize(global_size_host); } void Proj::update_ssj_gpu(){ _ssj->resize(_dim_hcu*_dim_mcu); int simstep; CHECK(_glv.geti("simstep", simstep)); const int8_t* ptr_sj = _sj->gpu_data(); int *ptr_ssj = _ssj->mutable_gpu_data(); int global_size_host; int *ptr_global_size_device; hipMalloc (&ptr_global_size_device, sizeof(int)); hipMemset (ptr_global_size_device, 0, sizeof(int)); hipLaunchKernelGGL(( update_ss_kernel_gpu), dim3(GSBN_GET_BLOCKS(_ssj->size())), dim3(GSBN_GET_THREADS(_ssj->size())), GSBN_CUDA_NUM_THREADS*sizeof(int), _stream, _ssj->size(), ptr_sj, ptr_ssj, ptr_global_size_device ); CUDA_POST_KERNEL_CHECK; hipMemcpyAsync(&global_size_host, ptr_global_size_device, sizeof(int), hipMemcpyDeviceToHost, _stream); _ssj->resize(global_size_host); } void Proj::update_que_gpu(){ const int *ptr_ii = _ii->gpu_data(); const int *ptr_di = _di->gpu_data(); const int8_t *ptr_si = _si->gpu_data(); int *ptr_qi = _qi->mutable_gpu_data(); int8_t *ptr_siq = _siq->mutable_gpu_data(); hipLaunchKernelGGL(( update_que_kernel_gpu), dim3(GSBN_GET_BLOCKS(_dim_hcu* _dim_conn)), dim3(GSBN_GET_THREADS(_dim_hcu* _dim_conn)), 0, 0, _dim_hcu * _dim_conn, ptr_ii, ptr_di, ptr_si, ptr_qi, ptr_siq ); CUDA_POST_KERNEL_CHECK; } //__global__ void update_full_kernel_gpu( // int dim_conn, // int dim_mcu, // float *ptr_pi, // float *ptr_ei, // float *ptr_zi, // int *ptr_ti, // const float *ptr_pj, // float *ptr_pij, // float *ptr_eij, // float *ptr_zi2, // float *ptr_zj2, // int *ptr_tij, // float *ptr_wij, // int simstep, // float kp, // float ke, // float kzi, // float kzj, // float wgain, // float eps, // float eps2 //){ // int i=blockIdx.y*gridDim.x+blockIdx.x; // int j=threadIdx.x; // // __shared__ float sh_pi; // if(j==0){ // float pi = ptr_pi[i]; // float zi = ptr_zi[i]; // int ti = ptr_ti[i]; // int pdt = simstep - ti; // if(pdt>0){ // float ei = ptr_ei[i]; // pi = (pi - ((ei*kp*kzi - ei*ke*kp + ke*kp*zi)/(ke - kp) + // (ke*kp*zi)/(kp - kzi))/(ke - kzi))/exp(kp*pdt) + // ((exp(kp*pdt - ke*pdt)*(ei*kp*kzi - ei*ke*kp + ke*kp*zi))/(ke - kp) + // (ke*kp*zi*exp(kp*pdt - kzi*pdt))/(kp - kzi))/(exp(kp*pdt)*(ke - kzi)); // ei = (ei - (ke*zi)/(ke - kzi))/exp(ke*pdt) + // (ke*zi*exp(ke*pdt - kzi*pdt))/(exp(ke*pdt)*(ke - kzi)); // zi = zi*exp(-kzi*pdt); // ti = simstep; // // ptr_pi[i] = pi; // ptr_ei[i] = ei; // ptr_zi[i] = zi; // ptr_ti[i] = ti; // } // sh_pi = pi; // } // __syncthreads(); // // int index = i*dim_mcu+j; // // int tij = ptr_tij[index]; // float zi2 = ptr_zi2[index]; // int pdt = simstep - tij; // if(pdt<=0){ // ptr_tij[index]=simstep; // }else{ // float pij = ptr_pij[index]; // float eij = ptr_eij[index]; // float zj2 = ptr_zj2[index]; // // pij = (pij + ((eij*kp*kzi - eij*ke*kp + eij*kp*kzj + ke*kp*zi2*zj2)/(ke - kp) - // (ke*kp*zi2*zj2)/(kzi - kp + kzj))/(kzi - ke + kzj))/exp(kp*pdt) - // ((exp(kp*pdt - ke*pdt)*(eij*kp*kzi - eij*ke*kp + eij*kp*kzj + ke*kp*zi2*zj2))/(ke - kp) - // (ke*kp*zi2*zj2*exp(kp*pdt - kzi*pdt - kzj*pdt))/ // (kzi - kp + kzj))/(exp(kp*pdt)*(kzi - ke + kzj)); // eij = (eij + (ke*zi2*zj2)/(kzi - ke + kzj))/exp(ke*pdt) - // (ke*zi2*zj2)/(exp(kzi*pdt)*exp(kzj*pdt)*(kzi - ke + kzj)); // zi2 = zi2*exp(-kzi*pdt); // zj2 = zj2*exp(-kzj*pdt); // tij = simstep; // // ptr_pij[index] = pij; // ptr_eij[index] = eij; // ptr_zi2[index] = zi2; // ptr_zj2[index] = zj2; // ptr_tij[index] = tij; // // // update wij and epsc // float wij; // if(kp){ // float pi = sh_pi; // float pj = ptr_pj[i/dim_conn*dim_mcu + j]; // /* // * Wij calculation: Original // */ // wij = wgain * log((pij + eps2)/((pi + eps)*(pj + eps))); // /* // * Wij calculation: Modified // */ // /* // if(pi<eps || pj<eps){ // wij=0; // }else{ // wij = wgain * log(pij/(pi*pj)); // } // */ // ptr_wij[index] = wij; // // } // } //} //__global__ void update_j_kernel_gpu( // int n, // const int8_t *ptr_sj, // float *ptr_pj, // float *ptr_ej, // float *ptr_zj, // float *ptr_bj, // float *ptr_epsc, // float kp, // float ke, // float kzj, // float kzi, // float kepsc, // float kftj, // float bgain, // float eps //){ // CUDA_KERNEL_LOOP(idx, n){ // float pj = ptr_pj[idx]; // float ej = ptr_ej[idx]; // float zj = ptr_zj[idx]; // int sj = ptr_sj[idx]; // // ptr_epsc[idx] *= (1-kepsc); // // pj += (ej - pj)*kp; // ej += (zj - ej)*ke; // zj *= (1-kzj); // if(sj>0){ // zj += kftj; // } // // if(kp){ // float bj; // /* // * Wij calculation: Original // */ // bj = bgain * log(pj); // /* // * Wij calculation: Modified // */ // /* // if(pj<eps){ // bj = bgain * log(eps); // }else{ // bj = bgain * log(pj); // } // */ // ptr_bj[idx]=bj; // } // ptr_pj[idx] = pj; // ptr_ej[idx] = ej; // ptr_zj[idx] = zj; // } //} //__global__ void update_row_kernel_gpu( // int dim_conn, // int dim_mcu, // const int *ptr_ssi, // float *ptr_pi, // float *ptr_ei, // float *ptr_zi, // int *ptr_ti, // const float *ptr_pj, // float *ptr_pij, // float *ptr_eij, // float *ptr_zi2, // float *ptr_zj2, // int *ptr_tij, // float* ptr_wij, // float* ptr_epsc, // int simstep, // float kp, // float ke, // float kzi, // float kzj, // float kfti, // float wgain, // float eps, // float eps2 //){ // int i = blockIdx.x; // int j = threadIdx.x; // int row = ptr_ssi[i]; // int col = j; // int index = row*dim_mcu+col; // // __shared__ float sh_pi; // // if(j==0){ // float pi = ptr_pi[row]; // float zi = ptr_zi[row]; // int ti = ptr_ti[row]; // int pdt = simstep - ti; // if(pdt<=0){ // ptr_zi[row] += kfti; // ptr_ti[row] = simstep; // }else{ // float ei = ptr_ei[row]; // // pi = (pi - ((ei*kp*kzi - ei*ke*kp + ke*kp*zi)/(ke - kp) + // (ke*kp*zi)/(kp - kzi))/(ke - kzi))/exp(kp*pdt) + // ((exp(kp*pdt - ke*pdt)*(ei*kp*kzi - ei*ke*kp + ke*kp*zi))/(ke - kp) + // (ke*kp*zi*exp(kp*pdt - kzi*pdt))/(kp - kzi))/(exp(kp*pdt)*(ke - kzi)); // ei = (ei - (ke*zi)/(ke - kzi))/exp(ke*pdt) + // (ke*zi*exp(ke*pdt - kzi*pdt))/(exp(ke*pdt)*(ke - kzi)); // zi = zi*exp(-kzi*pdt) + kfti; // ti = simstep; // ptr_pi[row] = pi; // ptr_ei[row] = ei; // ptr_zi[row] = zi; // ptr_ti[row] = ti; // } // sh_pi = pi; // } // // __syncthreads(); // // float pij = ptr_pij[index]; // int tij = ptr_tij[index]; // float zi2 = ptr_zi2[index]; // int pdt = simstep - tij; // if(pdt<=0){ // ptr_zi2[index] += kfti; // ptr_tij[index] = simstep; // }else{ // float eij = ptr_eij[index]; // float zj2 = ptr_zj2[index]; // // pij = (pij + ((eij*kp*kzi - eij*ke*kp + eij*kp*kzj + ke*kp*zi2*zj2)/(ke - kp) - // (ke*kp*zi2*zj2)/(kzi - kp + kzj))/(kzi - ke + kzj))/exp(kp*pdt) - // ((exp(kp*pdt - ke*pdt)*(eij*kp*kzi - eij*ke*kp + eij*kp*kzj + ke*kp*zi2*zj2))/(ke - kp) - // (ke*kp*zi2*zj2*exp(kp*pdt - kzi*pdt - kzj*pdt))/ // (kzi - kp + kzj))/(exp(kp*pdt)*(kzi - ke + kzj)); // eij = (eij + (ke*zi2*zj2)/(kzi - ke + kzj))/exp(ke*pdt) - // (ke*zi2*zj2)/(exp(kzi*pdt)*exp(kzj*pdt)*(kzi - ke + kzj)); // zi2 = zi2*exp(-kzi*pdt)+kfti; // zj2 = zj2*exp(-kzj*pdt); // tij = simstep; // // ptr_pij[index] = pij; // ptr_eij[index] = eij; // ptr_zi2[index] = zi2; // ptr_zj2[index] = zj2; // ptr_tij[index] = tij; // // float wij; // int idx_hcu = row / dim_conn; // int idx_mcu = idx_hcu * dim_mcu + j; // // if(kp){ // float pi = sh_pi; // float pj = ptr_pj[idx_mcu]; // /* // * Wij calculation: Original // */ // wij = wgain * log((pij + eps2)/((pi + eps)*(pj + eps))); // /* // * Wij calculation: Modified // */ // /* // if(pi<eps || pj<eps){ // wij=0; // }else{ // wij = wgain * log(pij/(pi*pj)); // } // */ // ptr_wij[index] = wij; // }else{ // wij = ptr_wij[index]; // } // // atomicAdd(&ptr_epsc[idx_mcu], wij); // } //} //__global__ void update_col_kernel_gpu( // int n, // int active_col_num, // int dim_conn, // int dim_mcu, // const int *ptr_ii, // const int *ptr_ssj, // float *ptr_pij, // float *ptr_eij, // float *ptr_zi2, // float *ptr_zj2, // int *ptr_tij, // int simstep, // float kp, // float ke, // float kzi, // float kzj, // float kftj //){ // CUDA_KERNEL_LOOP(idx, n){ // int i = idx/active_col_num; // int j = idx%active_col_num; // int row = ptr_ssj[j]/dim_mcu*dim_conn+i; // if(ptr_ii[row]<0){ // return; // } // int col = ptr_ssj[j]%dim_mcu; // int index = row*dim_mcu+col; // // int tij = ptr_tij[index]; // float zj2 = ptr_zj2[index]; // int pdt = simstep - tij; // if(pdt<=0){ // zj2 += kftj; // ptr_zj2[index]=zj2; // ptr_tij[index]=simstep; // }else{ // float pij = ptr_pij[index]; // float eij = ptr_eij[index]; // float zi2 = ptr_zi2[index]; // // pij = (pij + ((eij*kp*kzi - eij*ke*kp + eij*kp*kzj + ke*kp*zi2*zj2)/(ke - kp) - // (ke*kp*zi2*zj2)/(kzi - kp + kzj))/(kzi - ke + kzj))/exp(kp*pdt) - // ((exp(kp*pdt - ke*pdt)*(eij*kp*kzi - eij*ke*kp + eij*kp*kzj + ke*kp*zi2*zj2))/(ke - kp) - // (ke*kp*zi2*zj2*exp(kp*pdt - kzi*pdt - kzj*pdt))/ // (kzi - kp + kzj))/(exp(kp*pdt)*(kzi - ke + kzj)); // eij = (eij + (ke*zi2*zj2)/(kzi - ke + kzj))/exp(ke*pdt) - // (ke*zi2*zj2)/(exp(kzi*pdt)*exp(kzj*pdt)*(kzi - ke + kzj)); // zi2 = zi2*exp(-kzi*pdt); // zj2 = zj2*exp(-kzj*pdt)+kftj; // tij = simstep; // // ptr_pij[index] = pij; // ptr_eij[index] = eij; // ptr_zi2[index] = zi2; // ptr_zj2[index] = zj2; // ptr_tij[index] = tij; // } // } //} //void Proj::update_full_gpu(){ // int simstep; // float prn; // float old_prn; // CHECK(_glv.geti("simstep", simstep)); // CHECK(_glv.getf("prn", prn)); // CHECK(_glv.getf("old-prn", old_prn)); // if(old_prn!=prn){ // float *ptr_pi = _pi->mutable_gpu_data(); // float *ptr_ei = _ei->mutable_gpu_data(); // float *ptr_zi = _zi->mutable_gpu_data(); // int *ptr_ti = _ti->mutable_gpu_data(); // const float *ptr_pj = _pj->gpu_data(); // float *ptr_pij = _pij->mutable_gpu_data(); // float *ptr_eij = _eij->mutable_gpu_data(); // float *ptr_zi2 = _zi2->mutable_gpu_data(); // float *ptr_zj2 = _zj2->mutable_gpu_data(); // int *ptr_tij = _tij->mutable_gpu_data(); // float *ptr_wij = _wij->mutable_gpu_data(); // const dim3 GRID_SIZE(_dim_conn, _dim_hcu); // update_full_kernel_gpu<<<GRID_SIZE, _dim_mcu, 0, _stream>>>( // _dim_conn, // _dim_mcu, // ptr_pi, // ptr_ei, // ptr_zi, // ptr_ti, // ptr_pj, // ptr_pij, // ptr_eij, // ptr_zi2, // ptr_zj2, // ptr_tij, // ptr_wij, // simstep-1, // _taupdt*old_prn, // _tauedt, // _tauzidt, // _tauzjdt, // _wgain, // _eps, // _eps2 // ); // CUDA_POST_KERNEL_CHECK; // } //} //void Proj::update_j_gpu(){ // float prn; // CHECK(_glv.getf("prn", prn)); // float *ptr_pj = _pj->mutable_gpu_data(); // float *ptr_ej = _ej->mutable_gpu_data(); // float *ptr_zj = _zj->mutable_gpu_data(); // float *ptr_bj = _bj->mutable_gpu_data()+_proj_in_pop*_dim_hcu*_dim_mcu; // float *ptr_epsc = _epsc->mutable_gpu_data()+_proj_in_pop*_dim_hcu*_dim_mcu; // const int8_t *ptr_sj = _sj->gpu_data(); // update_j_kernel_gpu<<<GSBN_GET_BLOCKS(_dim_hcu*_dim_mcu), GSBN_GET_THREADS(_dim_hcu*_dim_mcu), 0, _stream>>>( // _dim_hcu*_dim_mcu, // ptr_sj, // ptr_pj, // ptr_ej, // ptr_zj, // ptr_bj, // ptr_epsc, // _taupdt*prn, // _tauedt, // _tauzjdt, // _tauzidt, // _tauepscdt, // _kftj, // _bgain, // _eps // ); // CUDA_POST_KERNEL_CHECK; //} //__global__ void update_siq_kernel_gpu( // int n, // const int *ptr_ii, // const int *ptr_di, // const int8_t *ptr_si, // int *ptr_qi, // float *ptr_siq //){ // CUDA_KERNEL_LOOP(i, n){ // int ii=ptr_ii[i]; // if(ii>=0){ // int32_t qi = ptr_qi[i]; // qi >>= 1; // ptr_siq[i] = float(qi & 0x01); // // int8_t spk = ptr_si[ii]; // if(spk>0){ // qi |= (0x01 << ptr_di[i]); // } // ptr_qi[i]=qi; // } // } //} //void Proj::update_ss_gpu(){ // const int *ptr_ii = _ii->gpu_data(); // const int *ptr_di = _di->gpu_data(); // const int8_t *ptr_si = _si->gpu_data(); // int *ptr_qi = _qi->mutable_gpu_data(); // float *ptr_siq = _siq->mutable_gpu_data(); // // update_siq_kernel_gpu<<<GSBN_GET_BLOCKS(_dim_hcu* _dim_conn), GSBN_GET_THREADS(_dim_hcu* _dim_conn), 0, _stream>>>( // _dim_hcu * _dim_conn, // ptr_ii, // ptr_di, // ptr_si, // ptr_qi, // ptr_siq // ); // CUDA_POST_KERNEL_CHECK; // // DEVICE_VECTOR_ITERATOR(int, it); // // // get active in spike // CONST_DEVICE_VECTOR(float, *v_siq) = _siq->gpu_vector(); // DEVICE_VECTOR(int, *v_ssi) = _ssi->mutable_gpu_vector(); // v_ssi->resize(v_siq->size()); // it = copy_if( // #ifndef CUDA_VERSION_LEGACY // thrust::hip::par.on(_stream), // #endif // make_counting_iterator<int>(0), // make_counting_iterator<int>(v_siq->size()), // v_siq->begin(), // v_ssi->begin(), // _1>0); // v_ssi->resize(thrust::distance(v_ssi->begin(), it)); // // // get active out spike // CONST_DEVICE_VECTOR(int8_t, *v_sj) = _sj->gpu_vector(); // DEVICE_VECTOR(int, *v_ssj) = _ssj->mutable_gpu_vector(); // v_ssj->resize(v_sj->size()); // it = copy_if( // #ifndef CUDA_VERSION_LEGACY // thrust::hip::par.on(_stream), // #endif // make_counting_iterator<int>(0), // make_counting_iterator<int>(v_sj->size()), // v_sj->begin(), // v_ssj->begin(), // _1>0); // v_ssj->resize(thrust::distance(v_ssj->begin(), it)); //} //void Proj::update_row_gpu(){ // int active_row_num = _ssi->gpu_vector()->size(); // if(active_row_num<=0){ // return; // } // // int simstep; // float prn; // CHECK(_glv.geti("simstep", simstep)); // CHECK(_glv.getf("prn", prn)); // // float *ptr_pi = _pi->mutable_gpu_data(); // float *ptr_ei = _ei->mutable_gpu_data(); // float *ptr_zi = _zi->mutable_gpu_data(); // int *ptr_ti = _ti->mutable_gpu_data(); // const float *ptr_pj = _pj->gpu_data(); // float *ptr_pij = _pij->mutable_gpu_data(); // float *ptr_eij = _eij->mutable_gpu_data(); // float *ptr_zi2 = _zi2->mutable_gpu_data(); // float *ptr_zj2 = _zj2->mutable_gpu_data(); // int *ptr_tij = _tij->mutable_gpu_data(); // float *ptr_wij = _wij->mutable_gpu_data(); // float *ptr_epsc = _epsc->mutable_gpu_data()+ _proj_in_pop * _dim_hcu * _dim_mcu; // const float *ptr_siq = _siq->gpu_data(); // // const int *ptr_ssi = _ssi->gpu_data(); //hipLaunchKernelGGL(( update_row_kernel_gpu), dim3(active_row_num), dim3(_dim_mcu), 0, _stream, // _dim_conn, // _dim_mcu, // ptr_ssi, // ptr_pi, // ptr_ei, // ptr_zi, // ptr_ti, // ptr_pj, // ptr_pij, // ptr_eij, // ptr_zi2, // ptr_zj2, // ptr_tij, // ptr_wij, // ptr_epsc, // simstep, // _taupdt*prn, // _tauedt, // _tauzidt, // _tauzjdt, // _kfti, // _wgain, // _eps, // _eps2 // ); // CUDA_POST_KERNEL_CHECK; //} //void Proj::update_col_gpu(){ // int active_col_num = _ssj->gpu_vector()->size(); // if(active_col_num<=0){ // return; // } // // int simstep; // float prn; // CHECK(_glv.geti("simstep", simstep)); // CHECK(_glv.getf("prn", prn)); // // float *ptr_pij = _pij->mutable_gpu_data(); // float *ptr_eij = _eij->mutable_gpu_data(); // float *ptr_zi2 = _zi2->mutable_gpu_data(); // float *ptr_zj2 = _zj2->mutable_gpu_data(); // int *ptr_tij = _tij->mutable_gpu_data(); // // const int *ptr_ii = _ii->gpu_data(); // const int *ptr_ssj = _ssj->gpu_data(); // //hipLaunchKernelGGL(( update_col_kernel_gpu), dim3(GSBN_GET_BLOCKS(_dim_conn*active_col_num)), dim3(GSBN_GET_THREADS(_dim_conn*active_col_num)), 0, _stream, // _dim_conn*active_col_num, // active_col_num, // _dim_conn, // _dim_mcu, // ptr_ii, // ptr_ssj, // ptr_pij, // ptr_eij, // ptr_zi2, // ptr_zj2, // ptr_tij, // simstep, // _taupdt * prn, // _tauedt, // _tauzidt, // _tauzjdt, // _kftj // ); // CUDA_POST_KERNEL_CHECK; //} } } #endif
2e06b7f4a89b62c9bcb768d3b83400b6dca68a7c.cu
#include "gsbn/procedures/ProcUpdLazy/Proj.hpp" #ifndef CPU_ONLY //#include <thrust/copy.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/functional.h> using namespace thrust::placeholders; namespace gsbn{ namespace proc_upd_lazy{ __global__ void update_all_kernel_gpu( int dim_conn, int dim_mcu, float *ptr_pi, float *ptr_ei, float *ptr_zi, int *ptr_ti, const float *ptr_pj, float *ptr_pij, float *ptr_eij, float *ptr_zi2, float *ptr_zj2, int *ptr_tij, float *ptr_wij, int simstep, float kp, float ke, float kzi, float kzj, float wgain, float eps, float eps2 ){ int i=blockIdx.y*gridDim.x+blockIdx.x; int j=threadIdx.x; __shared__ float sh_pi; if(j==0){ float pi = ptr_pi[i]; float zi = ptr_zi[i]; int ti = ptr_ti[i]; int pdt = simstep - ti; if(pdt>0){ float ei = ptr_ei[i]; pi = (pi - ((ei*kp*kzi - ei*ke*kp + ke*kp*zi)/(ke - kp) + (ke*kp*zi)/(kp - kzi))/(ke - kzi))/exp(kp*pdt) + ((exp(kp*pdt - ke*pdt)*(ei*kp*kzi - ei*ke*kp + ke*kp*zi))/(ke - kp) + (ke*kp*zi*exp(kp*pdt - kzi*pdt))/(kp - kzi))/(exp(kp*pdt)*(ke - kzi)); ei = (ei - (ke*zi)/(ke - kzi))/exp(ke*pdt) + (ke*zi*exp(ke*pdt - kzi*pdt))/(exp(ke*pdt)*(ke - kzi)); zi = zi*exp(-kzi*pdt); ti = simstep; ptr_pi[i] = pi; ptr_ei[i] = ei; ptr_zi[i] = zi; ptr_ti[i] = ti; } sh_pi = pi; } __syncthreads(); int index = i*dim_mcu+j; int tij = ptr_tij[index]; float zi2 = ptr_zi2[index]; int pdt = simstep - tij; if(pdt<=0){ ptr_tij[index]=simstep; }else{ float pij = ptr_pij[index]; float eij = ptr_eij[index]; float zj2 = ptr_zj2[index]; pij = (pij + ((eij*kp*kzi - eij*ke*kp + eij*kp*kzj + ke*kp*zi2*zj2)/(ke - kp) - (ke*kp*zi2*zj2)/(kzi - kp + kzj))/(kzi - ke + kzj))/exp(kp*pdt) - ((exp(kp*pdt - ke*pdt)*(eij*kp*kzi - eij*ke*kp + eij*kp*kzj + ke*kp*zi2*zj2))/(ke - kp) - (ke*kp*zi2*zj2*exp(kp*pdt - kzi*pdt - kzj*pdt))/ (kzi - kp + kzj))/(exp(kp*pdt)*(kzi - ke + kzj)); eij = (eij + (ke*zi2*zj2)/(kzi - ke + kzj))/exp(ke*pdt) - (ke*zi2*zj2)/(exp(kzi*pdt)*exp(kzj*pdt)*(kzi - ke + kzj)); zi2 = zi2*exp(-kzi*pdt); zj2 = zj2*exp(-kzj*pdt); tij = simstep; ptr_pij[index] = pij; ptr_eij[index] = eij; ptr_zi2[index] = zi2; ptr_zj2[index] = zj2; ptr_tij[index] = tij; // update wij and epsc float wij; if(kp){ float pi = sh_pi; float pj = ptr_pj[i/dim_conn*dim_mcu + j]; /* * Wij calculation: Original */ wij = wgain * log((pij + eps2)/((pi + eps)*(pj + eps))); /* * Wij calculation: Modified */ /* if(pi<eps || pj<eps){ wij=0; }else{ wij = wgain * log(pij/(pi*pj)); } */ ptr_wij[index] = wij; } } } __global__ void update_jxx_kernel_gpu( int n, const int8_t *ptr_sj, float *ptr_pj, float *ptr_ej, float *ptr_zj, float *ptr_bj, float *ptr_epsc, float kp, float ke, float kzj, float kzi, float kepsc, float kftj, float bgain, float eps ){ CUDA_KERNEL_LOOP(idx, n){ float pj = ptr_pj[idx]; float ej = ptr_ej[idx]; float zj = ptr_zj[idx]; int8_t sj = ptr_sj[idx]; ptr_epsc[idx] *= (1-kepsc); pj += (ej - pj)*kp; ej += (zj - ej)*ke; zj *= (1-kzj); if(sj>0){ zj += kftj; } if(kp){ float bj = bgain * log(pj + eps); ptr_bj[idx]=bj; } ptr_pj[idx] = pj; ptr_ej[idx] = ej; ptr_zj[idx] = zj; } } __global__ void update_row_kernel_gpu( int dim_conn, int dim_mcu, const int *ptr_ssi, float *ptr_pi, float *ptr_ei, float *ptr_zi, int *ptr_ti, const float *ptr_pj, float *ptr_pij, float *ptr_eij, float *ptr_zi2, float *ptr_zj2, int *ptr_tij, float* ptr_wij, float* ptr_epsc, int simstep, float kp, float ke, float kzi, float kzj, float kfti, float wgain, float eps, float eps2 ){ int i = blockIdx.x; int j = threadIdx.x; int row = ptr_ssi[i]; int col = j; int index = row*dim_mcu+col; __shared__ float sh_pi; if(j==0){ float pi = ptr_pi[row]; float zi = ptr_zi[row]; int ti = ptr_ti[row]; int pdt = simstep - ti; if(pdt<=0){ ptr_zi[row] += kfti; ptr_ti[row] = simstep; }else{ float ei = ptr_ei[row]; pi = (pi - ((ei*kp*kzi - ei*ke*kp + ke*kp*zi)/(ke - kp) + (ke*kp*zi)/(kp - kzi))/(ke - kzi))/exp(kp*pdt) + ((exp(kp*pdt - ke*pdt)*(ei*kp*kzi - ei*ke*kp + ke*kp*zi))/(ke - kp) + (ke*kp*zi*exp(kp*pdt - kzi*pdt))/(kp - kzi))/(exp(kp*pdt)*(ke - kzi)); ei = (ei - (ke*zi)/(ke - kzi))/exp(ke*pdt) + (ke*zi*exp(ke*pdt - kzi*pdt))/(exp(ke*pdt)*(ke - kzi)); zi = zi*exp(-kzi*pdt) + kfti; ti = simstep; ptr_pi[row] = pi; ptr_ei[row] = ei; ptr_zi[row] = zi; ptr_ti[row] = ti; } sh_pi = pi; } __syncthreads(); float pij = ptr_pij[index]; int tij = ptr_tij[index]; float zi2 = ptr_zi2[index]; int pdt = simstep - tij; if(pdt<=0){ ptr_zi2[index] += kfti; ptr_tij[index] = simstep; }else{ float eij = ptr_eij[index]; float zj2 = ptr_zj2[index]; pij = (pij + ((eij*kp*kzi - eij*ke*kp + eij*kp*kzj + ke*kp*zi2*zj2)/(ke - kp) - (ke*kp*zi2*zj2)/(kzi - kp + kzj))/(kzi - ke + kzj))/exp(kp*pdt) - ((exp(kp*pdt - ke*pdt)*(eij*kp*kzi - eij*ke*kp + eij*kp*kzj + ke*kp*zi2*zj2))/(ke - kp) - (ke*kp*zi2*zj2*exp(kp*pdt - kzi*pdt - kzj*pdt))/ (kzi - kp + kzj))/(exp(kp*pdt)*(kzi - ke + kzj)); eij = (eij + (ke*zi2*zj2)/(kzi - ke + kzj))/exp(ke*pdt) - (ke*zi2*zj2)/(exp(kzi*pdt)*exp(kzj*pdt)*(kzi - ke + kzj)); zi2 = zi2*exp(-kzi*pdt)+kfti; zj2 = zj2*exp(-kzj*pdt); tij = simstep; ptr_pij[index] = pij; ptr_eij[index] = eij; ptr_zi2[index] = zi2; ptr_zj2[index] = zj2; ptr_tij[index] = tij; float wij; int idx_hcu = row / dim_conn; int idx_mcu = idx_hcu * dim_mcu + j; if(kp){ float pi = sh_pi; float pj = ptr_pj[idx_mcu]; /* * Wij calculation: Original */ wij = wgain * log((pij + eps2)/((pi + eps)*(pj + eps))); /* * Wij calculation: Modified */ /* if(pi<eps || pj<eps){ wij=0; }else{ wij = wgain * log(pij/(pi*pj)); } */ ptr_wij[index] = wij; }else{ wij = ptr_wij[index]; } atomicAdd(&ptr_epsc[idx_mcu], wij); } } __global__ void update_col_kernel_gpu( int n, int active_col_num, int dim_conn, int dim_mcu, const int *ptr_ii, const int *ptr_ssj, float *ptr_pij, float *ptr_eij, float *ptr_zi2, float *ptr_zj2, int *ptr_tij, int simstep, float kp, float ke, float kzi, float kzj, float kftj ){ CUDA_KERNEL_LOOP(idx, n){ int j = idx/dim_conn; int i = idx%dim_conn; int row = ptr_ssj[j]/dim_mcu*dim_conn+i; if(ptr_ii[row]<0){ return; } int col = ptr_ssj[j]%dim_mcu; int index = row*dim_mcu+col; int tij = ptr_tij[index]; float zj2 = ptr_zj2[index]; int pdt = simstep - tij; if(pdt<=0){ zj2 += kftj; ptr_zj2[index]=zj2; ptr_tij[index]=simstep; }else{ float pij = ptr_pij[index]; float eij = ptr_eij[index]; float zi2 = ptr_zi2[index]; pij = (pij + ((eij*kp*kzi - eij*ke*kp + eij*kp*kzj + ke*kp*zi2*zj2)/(ke - kp) - (ke*kp*zi2*zj2)/(kzi - kp + kzj))/(kzi - ke + kzj))/exp(kp*pdt) - ((exp(kp*pdt - ke*pdt)*(eij*kp*kzi - eij*ke*kp + eij*kp*kzj + ke*kp*zi2*zj2))/(ke - kp) - (ke*kp*zi2*zj2*exp(kp*pdt - kzi*pdt - kzj*pdt))/ (kzi - kp + kzj))/(exp(kp*pdt)*(kzi - ke + kzj)); eij = (eij + (ke*zi2*zj2)/(kzi - ke + kzj))/exp(ke*pdt) - (ke*zi2*zj2)/(exp(kzi*pdt)*exp(kzj*pdt)*(kzi - ke + kzj)); zi2 = zi2*exp(-kzi*pdt); zj2 = zj2*exp(-kzj*pdt)+kftj; tij = simstep; ptr_pij[index] = pij; ptr_eij[index] = eij; ptr_zi2[index] = zi2; ptr_zj2[index] = zj2; ptr_tij[index] = tij; } } } void Proj::update_all_gpu(){ int simstep; float prn; float old_prn; CHECK(_glv.geti("simstep", simstep)); CHECK(_glv.getf("prn", prn)); CHECK(_glv.getf("old-prn", old_prn)); if(old_prn!=prn){ float *ptr_pi = _pi->mutable_gpu_data(); float *ptr_ei = _ei->mutable_gpu_data(); float *ptr_zi = _zi->mutable_gpu_data(); int *ptr_ti = _ti->mutable_gpu_data(); const float *ptr_pj = _pj->gpu_data(); float *ptr_pij = _pij->mutable_gpu_data(); float *ptr_eij = _eij->mutable_gpu_data(); float *ptr_zi2 = _zi2->mutable_gpu_data(); float *ptr_zj2 = _zj2->mutable_gpu_data(); int *ptr_tij = _tij->mutable_gpu_data(); float *ptr_wij = _wij->mutable_gpu_data(); const dim3 GRID_SIZE(_dim_conn, _dim_hcu); update_all_kernel_gpu<<<GRID_SIZE, _dim_mcu, 0, _stream>>>( _dim_conn, _dim_mcu, ptr_pi, ptr_ei, ptr_zi, ptr_ti, ptr_pj, ptr_pij, ptr_eij, ptr_zi2, ptr_zj2, ptr_tij, ptr_wij, simstep-1, _taupdt*old_prn, _tauedt, _tauzidt, _tauzjdt, _wgain, _eps, _eps2 ); CUDA_POST_KERNEL_CHECK; } } void Proj::update_jxx_gpu(){ float prn; CHECK(_glv.getf("prn", prn)); float *ptr_pj = _pj->mutable_gpu_data(); float *ptr_ej = _ej->mutable_gpu_data(); float *ptr_zj = _zj->mutable_gpu_data(); float *ptr_bj = _bj->mutable_gpu_data()+_proj_in_pop*_dim_hcu*_dim_mcu; float *ptr_epsc = _epsc->mutable_gpu_data()+_proj_in_pop*_dim_hcu*_dim_mcu; const int8_t *ptr_sj = _sj->gpu_data(); CUDA_CHECK(cudaSetDevice(_device)); update_jxx_kernel_gpu<<<GSBN_GET_BLOCKS(_dim_hcu*_dim_mcu), GSBN_GET_THREADS(_dim_hcu*_dim_mcu), 0, _stream>>>( _dim_hcu*_dim_mcu, ptr_sj, ptr_pj, ptr_ej, ptr_zj, ptr_bj, ptr_epsc, _taupdt*prn, _tauedt, _tauzjdt, _tauzidt, _tauepscdt, _kftj, _bgain, _eps ); CUDA_POST_KERNEL_CHECK; } __global__ void update_ss_kernel_gpu ( int n, const int8_t *s, int *ss, int *global_size ){ extern __shared__ int shmem0[]; int i=threadIdx.x; __shared__ int size; __shared__ int global_index; CUDA_KERNEL_LOOP(idx, n){ if(i==0){ size = 0; } __syncthreads(); if(s[idx]>0){ int index = atomicInc((unsigned int*)(&size), 1024); shmem0[index] = idx; } __syncthreads(); if(i==0){ global_index = atomicAdd(global_size, size); } __syncthreads(); if(i<size){ ss[global_index+i] = shmem0[i]; } __syncthreads(); } } __global__ void update_que_kernel_gpu( int n, const int *ptr_ii, const int *ptr_di, const int8_t *ptr_si, int *ptr_qi, int8_t *ptr_siq ){ CUDA_KERNEL_LOOP(i, n){ int ii=ptr_ii[i]; if(ii>=0){ int32_t qi = ptr_qi[i]; ptr_siq[i] = int8_t(qi & 0x01); int8_t spk = ptr_si[ii]; if(spk>0){ qi |= (0x01 << ptr_di[i]); } qi >>= 1; ptr_qi[i]=qi; } } } void Proj::update_row_gpu(){ int active_row_num = _ssi->gpu_vector()->size(); if(active_row_num<=0){ return; } int simstep; float prn; CHECK(_glv.geti("simstep", simstep)); CHECK(_glv.getf("prn", prn)); float *ptr_pi = _pi->mutable_gpu_data(); float *ptr_ei = _ei->mutable_gpu_data(); float *ptr_zi = _zi->mutable_gpu_data(); int *ptr_ti = _ti->mutable_gpu_data(); const float *ptr_pj = _pj->gpu_data(); float *ptr_pij = _pij->mutable_gpu_data(); float *ptr_eij = _eij->mutable_gpu_data(); float *ptr_zi2 = _zi2->mutable_gpu_data(); float *ptr_zj2 = _zj2->mutable_gpu_data(); int *ptr_tij = _tij->mutable_gpu_data(); float *ptr_wij = _wij->mutable_gpu_data(); float *ptr_epsc = _epsc->mutable_gpu_data()+ _proj_in_pop * _dim_hcu * _dim_mcu; const int *ptr_ssi = _ssi->gpu_data(); update_row_kernel_gpu<<<active_row_num, _dim_mcu, 0, _stream>>>( _dim_conn, _dim_mcu, ptr_ssi, ptr_pi, ptr_ei, ptr_zi, ptr_ti, ptr_pj, ptr_pij, ptr_eij, ptr_zi2, ptr_zj2, ptr_tij, ptr_wij, ptr_epsc, simstep, _taupdt*prn, _tauedt, _tauzidt, _tauzjdt, _kfti, _wgain, _eps, _eps2 ); CUDA_POST_KERNEL_CHECK; } void Proj::update_col_gpu(){ int active_col_num = _ssj->gpu_vector()->size(); if(active_col_num<=0){ return; } int simstep; float prn; CHECK(_glv.geti("simstep", simstep)); CHECK(_glv.getf("prn", prn)); float *ptr_pij = _pij->mutable_gpu_data(); float *ptr_eij = _eij->mutable_gpu_data(); float *ptr_zi2 = _zi2->mutable_gpu_data(); float *ptr_zj2 = _zj2->mutable_gpu_data(); int *ptr_tij = _tij->mutable_gpu_data(); const int *ptr_ii = _ii->gpu_data(); const int *ptr_ssj = _ssj->gpu_data(); update_col_kernel_gpu<<<GSBN_GET_BLOCKS(_dim_conn*active_col_num), GSBN_GET_THREADS(_dim_conn*active_col_num), 0, _stream>>>( _dim_conn*active_col_num, active_col_num, _dim_conn, _dim_mcu, ptr_ii, ptr_ssj, ptr_pij, ptr_eij, ptr_zi2, ptr_zj2, ptr_tij, simstep, _taupdt * prn, _tauedt, _tauzidt, _tauzjdt, _kftj ); CUDA_POST_KERNEL_CHECK; } /* void Proj::update_ssi_gpu(){ CONST_DEVICE_VECTOR(int, *v_siq) = _siq->gpu_vector(); _ssi->resize(v_siq->size()); DEVICE_VECTOR(int, *v_ssi) = _ssi->mutable_gpu_vector(); auto it = copy_if( #ifndef CUDA_VERSION_LEGACY thrust::cuda::par.on(_stream), #endif thrust::make_counting_iterator<int>(0), thrust::make_counting_iterator<int>(v_siq->size()), v_siq->begin(), v_ssi->begin(), _1>0); _ssi->resize(thrust::distance(v_ssi->begin(), it)); } void Proj::update_ssj_gpu(){ int simstep; CHECK(_glv.geti("simstep", simstep)); CONST_DEVICE_VECTOR(int8_t, *v_sj) = _sj->gpu_vector(); _ssj->resize(_dim_hcu*_dim_mcu); DEVICE_VECTOR(int, *v_ssj) = _ssj->mutable_gpu_vector(); auto it = copy_if( #ifndef CUDA_VERSION_LEGACY thrust::cuda::par.on(_stream), #endif thrust::make_counting_iterator<int>(0), thrust::make_counting_iterator<int>(_dim_hcu*_dim_mcu), v_sj->begin(), v_ssj->begin(), _1>0); _ssj->resize(thrust::distance(v_ssj->begin(), it)); } */ void Proj::update_ssi_gpu(){ _ssi->resize(_siq->size()); const int8_t* ptr_siq = _siq->gpu_data(); int *ptr_ssi = _ssi->mutable_gpu_data(); int global_size_host; int *ptr_global_size_device; cudaMalloc (&ptr_global_size_device, sizeof(int)); cudaMemset (ptr_global_size_device, 0, sizeof(int)); update_ss_kernel_gpu<<<GSBN_GET_BLOCKS(_ssi->size()), GSBN_GET_THREADS(_ssi->size()), GSBN_CUDA_NUM_THREADS*sizeof(int), _stream>>>( _ssi->size(), ptr_siq, ptr_ssi, ptr_global_size_device ); CUDA_POST_KERNEL_CHECK; cudaMemcpyAsync(&global_size_host, ptr_global_size_device, sizeof(int), cudaMemcpyDeviceToHost, _stream); _ssi->resize(global_size_host); } void Proj::update_ssj_gpu(){ _ssj->resize(_dim_hcu*_dim_mcu); int simstep; CHECK(_glv.geti("simstep", simstep)); const int8_t* ptr_sj = _sj->gpu_data(); int *ptr_ssj = _ssj->mutable_gpu_data(); int global_size_host; int *ptr_global_size_device; cudaMalloc (&ptr_global_size_device, sizeof(int)); cudaMemset (ptr_global_size_device, 0, sizeof(int)); update_ss_kernel_gpu<<<GSBN_GET_BLOCKS(_ssj->size()), GSBN_GET_THREADS(_ssj->size()), GSBN_CUDA_NUM_THREADS*sizeof(int), _stream>>>( _ssj->size(), ptr_sj, ptr_ssj, ptr_global_size_device ); CUDA_POST_KERNEL_CHECK; cudaMemcpyAsync(&global_size_host, ptr_global_size_device, sizeof(int), cudaMemcpyDeviceToHost, _stream); _ssj->resize(global_size_host); } void Proj::update_que_gpu(){ const int *ptr_ii = _ii->gpu_data(); const int *ptr_di = _di->gpu_data(); const int8_t *ptr_si = _si->gpu_data(); int *ptr_qi = _qi->mutable_gpu_data(); int8_t *ptr_siq = _siq->mutable_gpu_data(); update_que_kernel_gpu<<<GSBN_GET_BLOCKS(_dim_hcu* _dim_conn), GSBN_GET_THREADS(_dim_hcu* _dim_conn)>>>( _dim_hcu * _dim_conn, ptr_ii, ptr_di, ptr_si, ptr_qi, ptr_siq ); CUDA_POST_KERNEL_CHECK; } //__global__ void update_full_kernel_gpu( // int dim_conn, // int dim_mcu, // float *ptr_pi, // float *ptr_ei, // float *ptr_zi, // int *ptr_ti, // const float *ptr_pj, // float *ptr_pij, // float *ptr_eij, // float *ptr_zi2, // float *ptr_zj2, // int *ptr_tij, // float *ptr_wij, // int simstep, // float kp, // float ke, // float kzi, // float kzj, // float wgain, // float eps, // float eps2 //){ // int i=blockIdx.y*gridDim.x+blockIdx.x; // int j=threadIdx.x; // // __shared__ float sh_pi; // if(j==0){ // float pi = ptr_pi[i]; // float zi = ptr_zi[i]; // int ti = ptr_ti[i]; // int pdt = simstep - ti; // if(pdt>0){ // float ei = ptr_ei[i]; // pi = (pi - ((ei*kp*kzi - ei*ke*kp + ke*kp*zi)/(ke - kp) + // (ke*kp*zi)/(kp - kzi))/(ke - kzi))/exp(kp*pdt) + // ((exp(kp*pdt - ke*pdt)*(ei*kp*kzi - ei*ke*kp + ke*kp*zi))/(ke - kp) + // (ke*kp*zi*exp(kp*pdt - kzi*pdt))/(kp - kzi))/(exp(kp*pdt)*(ke - kzi)); // ei = (ei - (ke*zi)/(ke - kzi))/exp(ke*pdt) + // (ke*zi*exp(ke*pdt - kzi*pdt))/(exp(ke*pdt)*(ke - kzi)); // zi = zi*exp(-kzi*pdt); // ti = simstep; // // ptr_pi[i] = pi; // ptr_ei[i] = ei; // ptr_zi[i] = zi; // ptr_ti[i] = ti; // } // sh_pi = pi; // } // __syncthreads(); // // int index = i*dim_mcu+j; // // int tij = ptr_tij[index]; // float zi2 = ptr_zi2[index]; // int pdt = simstep - tij; // if(pdt<=0){ // ptr_tij[index]=simstep; // }else{ // float pij = ptr_pij[index]; // float eij = ptr_eij[index]; // float zj2 = ptr_zj2[index]; // // pij = (pij + ((eij*kp*kzi - eij*ke*kp + eij*kp*kzj + ke*kp*zi2*zj2)/(ke - kp) - // (ke*kp*zi2*zj2)/(kzi - kp + kzj))/(kzi - ke + kzj))/exp(kp*pdt) - // ((exp(kp*pdt - ke*pdt)*(eij*kp*kzi - eij*ke*kp + eij*kp*kzj + ke*kp*zi2*zj2))/(ke - kp) - // (ke*kp*zi2*zj2*exp(kp*pdt - kzi*pdt - kzj*pdt))/ // (kzi - kp + kzj))/(exp(kp*pdt)*(kzi - ke + kzj)); // eij = (eij + (ke*zi2*zj2)/(kzi - ke + kzj))/exp(ke*pdt) - // (ke*zi2*zj2)/(exp(kzi*pdt)*exp(kzj*pdt)*(kzi - ke + kzj)); // zi2 = zi2*exp(-kzi*pdt); // zj2 = zj2*exp(-kzj*pdt); // tij = simstep; // // ptr_pij[index] = pij; // ptr_eij[index] = eij; // ptr_zi2[index] = zi2; // ptr_zj2[index] = zj2; // ptr_tij[index] = tij; // // // update wij and epsc // float wij; // if(kp){ // float pi = sh_pi; // float pj = ptr_pj[i/dim_conn*dim_mcu + j]; // /* // * Wij calculation: Original // */ // wij = wgain * log((pij + eps2)/((pi + eps)*(pj + eps))); // /* // * Wij calculation: Modified // */ // /* // if(pi<eps || pj<eps){ // wij=0; // }else{ // wij = wgain * log(pij/(pi*pj)); // } // */ // ptr_wij[index] = wij; // // } // } //} //__global__ void update_j_kernel_gpu( // int n, // const int8_t *ptr_sj, // float *ptr_pj, // float *ptr_ej, // float *ptr_zj, // float *ptr_bj, // float *ptr_epsc, // float kp, // float ke, // float kzj, // float kzi, // float kepsc, // float kftj, // float bgain, // float eps //){ // CUDA_KERNEL_LOOP(idx, n){ // float pj = ptr_pj[idx]; // float ej = ptr_ej[idx]; // float zj = ptr_zj[idx]; // int sj = ptr_sj[idx]; // // ptr_epsc[idx] *= (1-kepsc); // // pj += (ej - pj)*kp; // ej += (zj - ej)*ke; // zj *= (1-kzj); // if(sj>0){ // zj += kftj; // } // // if(kp){ // float bj; // /* // * Wij calculation: Original // */ // bj = bgain * log(pj); // /* // * Wij calculation: Modified // */ // /* // if(pj<eps){ // bj = bgain * log(eps); // }else{ // bj = bgain * log(pj); // } // */ // ptr_bj[idx]=bj; // } // ptr_pj[idx] = pj; // ptr_ej[idx] = ej; // ptr_zj[idx] = zj; // } //} //__global__ void update_row_kernel_gpu( // int dim_conn, // int dim_mcu, // const int *ptr_ssi, // float *ptr_pi, // float *ptr_ei, // float *ptr_zi, // int *ptr_ti, // const float *ptr_pj, // float *ptr_pij, // float *ptr_eij, // float *ptr_zi2, // float *ptr_zj2, // int *ptr_tij, // float* ptr_wij, // float* ptr_epsc, // int simstep, // float kp, // float ke, // float kzi, // float kzj, // float kfti, // float wgain, // float eps, // float eps2 //){ // int i = blockIdx.x; // int j = threadIdx.x; // int row = ptr_ssi[i]; // int col = j; // int index = row*dim_mcu+col; // // __shared__ float sh_pi; // // if(j==0){ // float pi = ptr_pi[row]; // float zi = ptr_zi[row]; // int ti = ptr_ti[row]; // int pdt = simstep - ti; // if(pdt<=0){ // ptr_zi[row] += kfti; // ptr_ti[row] = simstep; // }else{ // float ei = ptr_ei[row]; // // pi = (pi - ((ei*kp*kzi - ei*ke*kp + ke*kp*zi)/(ke - kp) + // (ke*kp*zi)/(kp - kzi))/(ke - kzi))/exp(kp*pdt) + // ((exp(kp*pdt - ke*pdt)*(ei*kp*kzi - ei*ke*kp + ke*kp*zi))/(ke - kp) + // (ke*kp*zi*exp(kp*pdt - kzi*pdt))/(kp - kzi))/(exp(kp*pdt)*(ke - kzi)); // ei = (ei - (ke*zi)/(ke - kzi))/exp(ke*pdt) + // (ke*zi*exp(ke*pdt - kzi*pdt))/(exp(ke*pdt)*(ke - kzi)); // zi = zi*exp(-kzi*pdt) + kfti; // ti = simstep; // ptr_pi[row] = pi; // ptr_ei[row] = ei; // ptr_zi[row] = zi; // ptr_ti[row] = ti; // } // sh_pi = pi; // } // // __syncthreads(); // // float pij = ptr_pij[index]; // int tij = ptr_tij[index]; // float zi2 = ptr_zi2[index]; // int pdt = simstep - tij; // if(pdt<=0){ // ptr_zi2[index] += kfti; // ptr_tij[index] = simstep; // }else{ // float eij = ptr_eij[index]; // float zj2 = ptr_zj2[index]; // // pij = (pij + ((eij*kp*kzi - eij*ke*kp + eij*kp*kzj + ke*kp*zi2*zj2)/(ke - kp) - // (ke*kp*zi2*zj2)/(kzi - kp + kzj))/(kzi - ke + kzj))/exp(kp*pdt) - // ((exp(kp*pdt - ke*pdt)*(eij*kp*kzi - eij*ke*kp + eij*kp*kzj + ke*kp*zi2*zj2))/(ke - kp) - // (ke*kp*zi2*zj2*exp(kp*pdt - kzi*pdt - kzj*pdt))/ // (kzi - kp + kzj))/(exp(kp*pdt)*(kzi - ke + kzj)); // eij = (eij + (ke*zi2*zj2)/(kzi - ke + kzj))/exp(ke*pdt) - // (ke*zi2*zj2)/(exp(kzi*pdt)*exp(kzj*pdt)*(kzi - ke + kzj)); // zi2 = zi2*exp(-kzi*pdt)+kfti; // zj2 = zj2*exp(-kzj*pdt); // tij = simstep; // // ptr_pij[index] = pij; // ptr_eij[index] = eij; // ptr_zi2[index] = zi2; // ptr_zj2[index] = zj2; // ptr_tij[index] = tij; // // float wij; // int idx_hcu = row / dim_conn; // int idx_mcu = idx_hcu * dim_mcu + j; // // if(kp){ // float pi = sh_pi; // float pj = ptr_pj[idx_mcu]; // /* // * Wij calculation: Original // */ // wij = wgain * log((pij + eps2)/((pi + eps)*(pj + eps))); // /* // * Wij calculation: Modified // */ // /* // if(pi<eps || pj<eps){ // wij=0; // }else{ // wij = wgain * log(pij/(pi*pj)); // } // */ // ptr_wij[index] = wij; // }else{ // wij = ptr_wij[index]; // } // // atomicAdd(&ptr_epsc[idx_mcu], wij); // } //} //__global__ void update_col_kernel_gpu( // int n, // int active_col_num, // int dim_conn, // int dim_mcu, // const int *ptr_ii, // const int *ptr_ssj, // float *ptr_pij, // float *ptr_eij, // float *ptr_zi2, // float *ptr_zj2, // int *ptr_tij, // int simstep, // float kp, // float ke, // float kzi, // float kzj, // float kftj //){ // CUDA_KERNEL_LOOP(idx, n){ // int i = idx/active_col_num; // int j = idx%active_col_num; // int row = ptr_ssj[j]/dim_mcu*dim_conn+i; // if(ptr_ii[row]<0){ // return; // } // int col = ptr_ssj[j]%dim_mcu; // int index = row*dim_mcu+col; // // int tij = ptr_tij[index]; // float zj2 = ptr_zj2[index]; // int pdt = simstep - tij; // if(pdt<=0){ // zj2 += kftj; // ptr_zj2[index]=zj2; // ptr_tij[index]=simstep; // }else{ // float pij = ptr_pij[index]; // float eij = ptr_eij[index]; // float zi2 = ptr_zi2[index]; // // pij = (pij + ((eij*kp*kzi - eij*ke*kp + eij*kp*kzj + ke*kp*zi2*zj2)/(ke - kp) - // (ke*kp*zi2*zj2)/(kzi - kp + kzj))/(kzi - ke + kzj))/exp(kp*pdt) - // ((exp(kp*pdt - ke*pdt)*(eij*kp*kzi - eij*ke*kp + eij*kp*kzj + ke*kp*zi2*zj2))/(ke - kp) - // (ke*kp*zi2*zj2*exp(kp*pdt - kzi*pdt - kzj*pdt))/ // (kzi - kp + kzj))/(exp(kp*pdt)*(kzi - ke + kzj)); // eij = (eij + (ke*zi2*zj2)/(kzi - ke + kzj))/exp(ke*pdt) - // (ke*zi2*zj2)/(exp(kzi*pdt)*exp(kzj*pdt)*(kzi - ke + kzj)); // zi2 = zi2*exp(-kzi*pdt); // zj2 = zj2*exp(-kzj*pdt)+kftj; // tij = simstep; // // ptr_pij[index] = pij; // ptr_eij[index] = eij; // ptr_zi2[index] = zi2; // ptr_zj2[index] = zj2; // ptr_tij[index] = tij; // } // } //} //void Proj::update_full_gpu(){ // int simstep; // float prn; // float old_prn; // CHECK(_glv.geti("simstep", simstep)); // CHECK(_glv.getf("prn", prn)); // CHECK(_glv.getf("old-prn", old_prn)); // if(old_prn!=prn){ // float *ptr_pi = _pi->mutable_gpu_data(); // float *ptr_ei = _ei->mutable_gpu_data(); // float *ptr_zi = _zi->mutable_gpu_data(); // int *ptr_ti = _ti->mutable_gpu_data(); // const float *ptr_pj = _pj->gpu_data(); // float *ptr_pij = _pij->mutable_gpu_data(); // float *ptr_eij = _eij->mutable_gpu_data(); // float *ptr_zi2 = _zi2->mutable_gpu_data(); // float *ptr_zj2 = _zj2->mutable_gpu_data(); // int *ptr_tij = _tij->mutable_gpu_data(); // float *ptr_wij = _wij->mutable_gpu_data(); // const dim3 GRID_SIZE(_dim_conn, _dim_hcu); // update_full_kernel_gpu<<<GRID_SIZE, _dim_mcu, 0, _stream>>>( // _dim_conn, // _dim_mcu, // ptr_pi, // ptr_ei, // ptr_zi, // ptr_ti, // ptr_pj, // ptr_pij, // ptr_eij, // ptr_zi2, // ptr_zj2, // ptr_tij, // ptr_wij, // simstep-1, // _taupdt*old_prn, // _tauedt, // _tauzidt, // _tauzjdt, // _wgain, // _eps, // _eps2 // ); // CUDA_POST_KERNEL_CHECK; // } //} //void Proj::update_j_gpu(){ // float prn; // CHECK(_glv.getf("prn", prn)); // float *ptr_pj = _pj->mutable_gpu_data(); // float *ptr_ej = _ej->mutable_gpu_data(); // float *ptr_zj = _zj->mutable_gpu_data(); // float *ptr_bj = _bj->mutable_gpu_data()+_proj_in_pop*_dim_hcu*_dim_mcu; // float *ptr_epsc = _epsc->mutable_gpu_data()+_proj_in_pop*_dim_hcu*_dim_mcu; // const int8_t *ptr_sj = _sj->gpu_data(); // update_j_kernel_gpu<<<GSBN_GET_BLOCKS(_dim_hcu*_dim_mcu), GSBN_GET_THREADS(_dim_hcu*_dim_mcu), 0, _stream>>>( // _dim_hcu*_dim_mcu, // ptr_sj, // ptr_pj, // ptr_ej, // ptr_zj, // ptr_bj, // ptr_epsc, // _taupdt*prn, // _tauedt, // _tauzjdt, // _tauzidt, // _tauepscdt, // _kftj, // _bgain, // _eps // ); // CUDA_POST_KERNEL_CHECK; //} //__global__ void update_siq_kernel_gpu( // int n, // const int *ptr_ii, // const int *ptr_di, // const int8_t *ptr_si, // int *ptr_qi, // float *ptr_siq //){ // CUDA_KERNEL_LOOP(i, n){ // int ii=ptr_ii[i]; // if(ii>=0){ // int32_t qi = ptr_qi[i]; // qi >>= 1; // ptr_siq[i] = float(qi & 0x01); // // int8_t spk = ptr_si[ii]; // if(spk>0){ // qi |= (0x01 << ptr_di[i]); // } // ptr_qi[i]=qi; // } // } //} //void Proj::update_ss_gpu(){ // const int *ptr_ii = _ii->gpu_data(); // const int *ptr_di = _di->gpu_data(); // const int8_t *ptr_si = _si->gpu_data(); // int *ptr_qi = _qi->mutable_gpu_data(); // float *ptr_siq = _siq->mutable_gpu_data(); // // update_siq_kernel_gpu<<<GSBN_GET_BLOCKS(_dim_hcu* _dim_conn), GSBN_GET_THREADS(_dim_hcu* _dim_conn), 0, _stream>>>( // _dim_hcu * _dim_conn, // ptr_ii, // ptr_di, // ptr_si, // ptr_qi, // ptr_siq // ); // CUDA_POST_KERNEL_CHECK; // // DEVICE_VECTOR_ITERATOR(int, it); // // // get active in spike // CONST_DEVICE_VECTOR(float, *v_siq) = _siq->gpu_vector(); // DEVICE_VECTOR(int, *v_ssi) = _ssi->mutable_gpu_vector(); // v_ssi->resize(v_siq->size()); // it = copy_if( // #ifndef CUDA_VERSION_LEGACY // thrust::cuda::par.on(_stream), // #endif // make_counting_iterator<int>(0), // make_counting_iterator<int>(v_siq->size()), // v_siq->begin(), // v_ssi->begin(), // _1>0); // v_ssi->resize(thrust::distance(v_ssi->begin(), it)); // // // get active out spike // CONST_DEVICE_VECTOR(int8_t, *v_sj) = _sj->gpu_vector(); // DEVICE_VECTOR(int, *v_ssj) = _ssj->mutable_gpu_vector(); // v_ssj->resize(v_sj->size()); // it = copy_if( // #ifndef CUDA_VERSION_LEGACY // thrust::cuda::par.on(_stream), // #endif // make_counting_iterator<int>(0), // make_counting_iterator<int>(v_sj->size()), // v_sj->begin(), // v_ssj->begin(), // _1>0); // v_ssj->resize(thrust::distance(v_ssj->begin(), it)); //} //void Proj::update_row_gpu(){ // int active_row_num = _ssi->gpu_vector()->size(); // if(active_row_num<=0){ // return; // } // // int simstep; // float prn; // CHECK(_glv.geti("simstep", simstep)); // CHECK(_glv.getf("prn", prn)); // // float *ptr_pi = _pi->mutable_gpu_data(); // float *ptr_ei = _ei->mutable_gpu_data(); // float *ptr_zi = _zi->mutable_gpu_data(); // int *ptr_ti = _ti->mutable_gpu_data(); // const float *ptr_pj = _pj->gpu_data(); // float *ptr_pij = _pij->mutable_gpu_data(); // float *ptr_eij = _eij->mutable_gpu_data(); // float *ptr_zi2 = _zi2->mutable_gpu_data(); // float *ptr_zj2 = _zj2->mutable_gpu_data(); // int *ptr_tij = _tij->mutable_gpu_data(); // float *ptr_wij = _wij->mutable_gpu_data(); // float *ptr_epsc = _epsc->mutable_gpu_data()+ _proj_in_pop * _dim_hcu * _dim_mcu; // const float *ptr_siq = _siq->gpu_data(); // // const int *ptr_ssi = _ssi->gpu_data(); // update_row_kernel_gpu<<<active_row_num, _dim_mcu, 0, _stream>>>( // _dim_conn, // _dim_mcu, // ptr_ssi, // ptr_pi, // ptr_ei, // ptr_zi, // ptr_ti, // ptr_pj, // ptr_pij, // ptr_eij, // ptr_zi2, // ptr_zj2, // ptr_tij, // ptr_wij, // ptr_epsc, // simstep, // _taupdt*prn, // _tauedt, // _tauzidt, // _tauzjdt, // _kfti, // _wgain, // _eps, // _eps2 // ); // CUDA_POST_KERNEL_CHECK; //} //void Proj::update_col_gpu(){ // int active_col_num = _ssj->gpu_vector()->size(); // if(active_col_num<=0){ // return; // } // // int simstep; // float prn; // CHECK(_glv.geti("simstep", simstep)); // CHECK(_glv.getf("prn", prn)); // // float *ptr_pij = _pij->mutable_gpu_data(); // float *ptr_eij = _eij->mutable_gpu_data(); // float *ptr_zi2 = _zi2->mutable_gpu_data(); // float *ptr_zj2 = _zj2->mutable_gpu_data(); // int *ptr_tij = _tij->mutable_gpu_data(); // // const int *ptr_ii = _ii->gpu_data(); // const int *ptr_ssj = _ssj->gpu_data(); // // update_col_kernel_gpu<<<GSBN_GET_BLOCKS(_dim_conn*active_col_num), GSBN_GET_THREADS(_dim_conn*active_col_num), 0, _stream>>>( // _dim_conn*active_col_num, // active_col_num, // _dim_conn, // _dim_mcu, // ptr_ii, // ptr_ssj, // ptr_pij, // ptr_eij, // ptr_zi2, // ptr_zj2, // ptr_tij, // simstep, // _taupdt * prn, // _tauedt, // _tauzidt, // _tauzjdt, // _kftj // ); // CUDA_POST_KERNEL_CHECK; //} } } #endif
4aadb813312934335234d1d595cd1e2a454641d1.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <vector> #include "paddle/fluid/operators/reduce_ops/cub_reduce.h" #include "paddle/fluid/operators/reduce_ops/reduce_mean_op.h" namespace paddle { namespace operators { template <typename T> struct DivideFunctor { HOSTDEVICE explicit inline DivideFunctor(int n) : n_inv((T)(1.0 / n)) {} HOSTDEVICE inline T operator()(const T& x) const { return x * n_inv; } private: T n_inv; }; template <typename T> class ReduceMeanKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { bool reduce_all = context.Attr<bool>("reduce_all"); auto* input = context.Input<Tensor>("X"); auto* output = context.Output<Tensor>("Out"); auto dims = context.Attr<std::vector<int>>("dim"); bool keep_dim = context.Attr<bool>("keep_dim"); std::vector<int> reduce_dims; if (reduce_all) { reduce_dims.resize(input->dims().size()); for (int i = 0; i < reduce_dims.size(); ++i) reduce_dims[i] = i; } else { for (auto e : dims) { reduce_dims.push_back(e >= 0 ? e : e + input->dims().size()); } } int reduce_num = 1; for (int i = 0; i < reduce_dims.size(); ++i) { reduce_num *= input->dims()[reduce_dims[i]]; } auto stream = context.cuda_device_context().stream(); TensorReduce<T, T, hipcub::Sum, DivideFunctor<T>>( *input, output, reduce_dims, static_cast<T>(0), hipcub::Sum(), DivideFunctor<T>(reduce_num), stream); } }; } // namespace operators } // namespace paddle REGISTER_OP_CUDA_KERNEL(reduce_mean, ops::ReduceMeanKernel<bool>, ops::ReduceMeanKernel<float>, ops::ReduceMeanKernel<double>);
4aadb813312934335234d1d595cd1e2a454641d1.cu
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <vector> #include "paddle/fluid/operators/reduce_ops/cub_reduce.h" #include "paddle/fluid/operators/reduce_ops/reduce_mean_op.h" namespace paddle { namespace operators { template <typename T> struct DivideFunctor { HOSTDEVICE explicit inline DivideFunctor(int n) : n_inv((T)(1.0 / n)) {} HOSTDEVICE inline T operator()(const T& x) const { return x * n_inv; } private: T n_inv; }; template <typename T> class ReduceMeanKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { bool reduce_all = context.Attr<bool>("reduce_all"); auto* input = context.Input<Tensor>("X"); auto* output = context.Output<Tensor>("Out"); auto dims = context.Attr<std::vector<int>>("dim"); bool keep_dim = context.Attr<bool>("keep_dim"); std::vector<int> reduce_dims; if (reduce_all) { reduce_dims.resize(input->dims().size()); for (int i = 0; i < reduce_dims.size(); ++i) reduce_dims[i] = i; } else { for (auto e : dims) { reduce_dims.push_back(e >= 0 ? e : e + input->dims().size()); } } int reduce_num = 1; for (int i = 0; i < reduce_dims.size(); ++i) { reduce_num *= input->dims()[reduce_dims[i]]; } auto stream = context.cuda_device_context().stream(); TensorReduce<T, T, cub::Sum, DivideFunctor<T>>( *input, output, reduce_dims, static_cast<T>(0), cub::Sum(), DivideFunctor<T>(reduce_num), stream); } }; } // namespace operators } // namespace paddle REGISTER_OP_CUDA_KERNEL(reduce_mean, ops::ReduceMeanKernel<bool>, ops::ReduceMeanKernel<float>, ops::ReduceMeanKernel<double>);
dd9187ef66642b1ad1ce7f62fd3fa184963fac97.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include "kernel.h" #include <hip/hip_runtime.h> #include <rocblas.h> #include <cusolverDn.h> #define cudaCheckErrors(msg) \ do { \ hipError_t __err = hipGetLastError(); \ if (__err != hipSuccess) { \ fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \ msg, hipGetErrorString(__err), \ __FILE__, __LINE__); \ fprintf(stderr, "*** FAILED - ABORTING\n"); \ system("pause");\ exit(1); \ } \ } while (0) static GPUConstraint *constraint,*dev_constraint; static GPUPrimitive *primitive,*dev_primitive; static glm::vec3 *pos,*dev_pos; static glm::vec3 *vel,*dev_vel; static glm::vec3 *force,*dev_force; static glm::vec3 *dev_pbd; static glm::vec3 *dev_collisionNormal; static glm::vec3 *angular_momentum,*dev_angular_momentum; static glm::mat3x3 *inertia,*dev_inertia; static float *dev_dist; static int height,width,dimension,constraintNum,primitiveNum,triangleNum; static float mass; static float restitution_coefficient,damping_coefficient; static int torn,*dev_torn; static int *torn_id,*dev_torn_id; static glm::vec3 * dev_k1_x; static glm::vec3 * dev_k1_v; static glm::vec3 * dev_k2_x; static glm::vec3 * dev_k2_v; static glm::vec3 * dev_k3_x; static glm::vec3 * dev_k3_v; static glm::vec3 * dev_k4_x; static glm::vec3 * dev_k4_v; static glm::vec3 * dev_pos_temp1,*pos_temp1; static glm::vec3 * dev_vel_temp1,*vel_temp1; static glm::vec3 * dev_external_force; static float* dev_vel_implicit; static float* dev_b_implicit; static float* dev_force_implicit; static int* dev_coo_Rows; static int* dev_csr_Rows; static int* dev_Cols; static float* dev_Val; static int dev_nnz; static int springConstraintNum; static float tear_value; static bool tearable; static glm::vec3 *dev_primitive_pos; /* helper function for matrix operation */ //matrix copy matrix v1=v2 __global__ void vector_copy_vector(glm::vec3 *v1,glm::vec3 *v2,int N){ int index=blockDim.x*blockIdx.x+threadIdx.x; if(index<N){ v1[index]=v2[index]; } } //matrix add matrix v3=v1+v2 __global__ void vector_add_vector(glm::vec3 *v1,glm::vec3 *v2,glm::vec3 *v3, int N){ int index=blockDim.x*blockIdx.x+threadIdx.x; if(index<N){ v3[index]=v1[index]+v2[index]; } } //matrix add matrix times a factor v3=v1+v2*mul __global__ void vector_add_mulvector(glm::vec3 *v1,glm::vec3 *v2,glm::vec3 *v3,float mul,int N){ int index=blockDim.x*blockIdx.x+threadIdx.x; if(index<N){ v3[index]=v1[index]+mul*v2[index]; } } //matrix minus matrix v3=v1-v2 __global__ void vector_minus_vector(glm::vec3 *v1,glm::vec3 *v2,glm::vec3 *v3,int N){ int index=blockDim.x*blockIdx.x+threadIdx.x; if(index<N){ v3[index]=v1[index]-v2[index]; } } //matrix minus matrix times a factor v3=v1-v2*mul __global__ void vector_minus_mulvector(glm::vec3 *v1,glm::vec3 *v2,glm::vec3 *v3,float mul,int N){ int index=blockDim.x*blockIdx.x+threadIdx.x; if(index<N){ v3[index]=v1[index]-mul*v2[index]; } } //matrix times a factor v_out=v_in*mul __global__ void vector_mul_scalar(glm::vec3 * v_out, glm::vec3 * v_in, float mul ,int N) { int index = blockDim.x*blockIdx.x + threadIdx.x; if (index<N){ v_out[index] = mul * v_in[index]; } } //matrix minus a matrix and times a factor v3=(v1-v2)*mul __global__ void vector_add_vector_mul(glm::vec3 *v1,glm::vec3 *v2,glm::vec3 *v3,float mul,int N){ int index=blockDim.x*blockIdx.x+threadIdx.x; if(index<N){ v3[index]=(v1[index]+v2[index])*mul; } } //matrix add a matrix and times a factor v3=(v1+v2)*mul __global__ void vector_minus_vector_mul(glm::vec3 *v1,glm::vec3 *v2,glm::vec3 *v3,float mul,int N){ int index=blockDim.x*blockIdx.x+threadIdx.x; if(index<N){ v3[index]=(v1[index]-v2[index])*mul; } } // implicit method Ax=b compute the rhs __global__ void compute_b(float mass, float dt, float* _dev_v, float* _dev_force, float* b,int N) { int index = blockDim.x*blockIdx.x + threadIdx.x; if (index<N){ b[index] = mass*_dev_v[index] + dt*_dev_force[index]; } } __global__ void convert_2_implicit_data(glm::vec3* data_in, float* data_out, int N) { int index = blockDim.x*blockIdx.x + threadIdx.x; if (index<N){ data_out[3 * index] = data_in[index].x; data_out[3 * index + 1] = data_in[index].y; data_out[3 * index + 2] = data_in[index].z; } } __global__ void inv_convert_2_implicit_data(float* data_in, glm::vec3* data_out, int N) { int index = blockDim.x*blockIdx.x + threadIdx.x; if (index<N){ data_out[index].x = data_in[3 * index]; data_out[index].y = data_in[3 * index + 1]; data_out[index].z = data_in[3 * index + 2]; } } /* helper function for matrix operation */ /* Intersection Test */ __device__ bool CubeIntersectionTest(glm::vec3 &p, glm::vec3 &normal, glm::vec3 dim,glm::vec3 center,float &dist) { float COLLISION_EPSILON=1e-1; glm::vec3 diff = p - center; float xcollide,ycollide,zcollide; xcollide=(fabs(diff.x)-dim.x-COLLISION_EPSILON); ycollide=(fabs(diff.y)-dim.y-COLLISION_EPSILON); zcollide=(fabs(diff.z)-dim.z-COLLISION_EPSILON); if(xcollide<0&&ycollide<0&&zcollide<0){ if(xcollide>=ycollide&&xcollide>=zcollide){ if(diff.x>0){ normal=glm::vec3(1,0,0); } else{ normal=glm::vec3(-1,0,0); } dist=xcollide; return true; } else if(ycollide>=xcollide&&ycollide>=zcollide){ if(diff.y>0){ normal=glm::vec3(0,1,0); } else{ normal=glm::vec3(0,-1,0); } dist=ycollide; return true; } else if(zcollide>=ycollide&&zcollide>=xcollide){ if(diff.z>0){ normal=glm::vec3(0,0,1); } else{ normal=glm::vec3(0,0,-1); } dist=zcollide; return true; } } else { dist=1; return false; } } __device__ bool SphereIntersectionTest(glm::vec3 &p, glm::vec3 &normal,float radius, glm::vec3 center,float &dist) { float COLLISION_EPSILON=1e-1; glm::vec3 diff = p - center; dist = glm::length(diff) - radius - COLLISION_EPSILON; if (dist < 0) { normal = glm::normalize(diff); return true; } else { return false; } } __device__ bool PlaneIntersectionTest(glm::vec3 &p, glm::vec3 &normal, glm::vec3 pNormal, glm::vec3 center,float &dist) { float COLLISION_EPSILON=1e-1; float height = center[1]; dist = p.y - height - COLLISION_EPSILON; normal=pNormal; if (dist < 0) { return true; } else { return false; } } __device__ bool insideBoxOnGPU(glm::vec3 pos,kdtree *tree){ if(pos.x<=tree->xMax&&pos.x>=tree->xMin&&pos.y<=tree->yMax&&pos.y>=tree->yMin&& pos.z<=tree->zMax&&pos.z>=tree->zMin){ return true; } else return false; } __device__ void getNearbyTrianglesOnGPU(glm::vec3 pos,kdtree *tree, int *list){ int count=0,num=0,n=0; kdtree *kd[1000]; kd[count++]=tree; while(count<1000&&n!=count&&num<180){ kdtree *current=kd[n]; if(insideBoxOnGPU(pos,current)){ if(current->lc==nullptr&&current->rc==nullptr) list[num++]=current->index; else{ kd[count++]=current->lc; if(count>=1000) break; kd[count++]=current->rc; } } n++; } } __device__ glm::vec3 getNormalOnGPU(glm::vec3 *m_positions,glm::vec3 *m_normals,int *m_indices, unsigned short TriangleIndex){ glm::vec3 n1,n2,n3,v1,v2,v3,n,crossN,v12,v13; unsigned int index1,index2,index3; index1=m_indices[3*TriangleIndex]; index2=m_indices[3*TriangleIndex+1]; index3=m_indices[3*TriangleIndex+2]; v1=m_positions[index1];v2=m_positions[index2];v3=m_positions[index3]; n1=m_normals[index1];n2=m_normals[index2];n3=m_normals[index3]; v12=v1-v2;v13=v1-v3; v12=glm::normalize(v12);v13=glm::normalize(v13); crossN=glm::cross(v12,v13); crossN=glm::normalize(crossN); n=(n1+n2+n3); n=glm::normalize(n); if(glm::dot(n,crossN)<0) return -crossN; else return crossN; } __device__ float getDistanceOnGPU(glm::vec3 *m_positions,glm::vec3 *m_normals,int *m_indices,glm::vec3 p,unsigned short TriangleIndex){ float dis,k,x; unsigned short index; index=m_indices[3*TriangleIndex]; glm::vec3 normal=getNormalOnGPU(m_positions,m_normals,m_indices,TriangleIndex); glm::vec3 d=p-m_positions[index]; x=-(normal.x*d.x+normal.y*d.y+normal.z*d.z); //k=normal.x*normal.x+normal.y*normal.y+normal.z*normal.z; //dis=x/k; return x; } __device__ bool ObjectIntersectionTest(glm::vec3 & p, glm::vec3 & normal, kdtree *tree,glm::vec3 center,float &dist, glm::vec3 *obj_vertex,glm::vec3 *obj_normal,int *obj_indices) { // TODO float minDis=1e7; float COLLISION_EPSILON=1e-3; bool inCollision=false; glm::vec3 pos=p;//-center; int list[180]; for(int i=0;i<180;++i) list[i]=-1; getNearbyTrianglesOnGPU(pos,tree,list); pos-=center; for(int i=0;i<180;i++){ if(list[i]==-1) break; float tmp=getDistanceOnGPU(obj_vertex,obj_normal,obj_indices,pos,list[i]); if(tmp>0&&tmp<minDis&&tmp<0.1){ glm::vec3 n=getNormalOnGPU(obj_vertex,obj_normal,obj_indices,list[i]); normal=n; minDis=tmp; inCollision=true; } } dist=-minDis-COLLISION_EPSILON; return inCollision; } /* Intetsection Test */ __global__ void collisionDetectionKernel(glm::vec3 *pos,glm::vec3 *nor,float *dist,GPUPrimitive *primitive,int primitiveNum,int N){ int index=blockDim.x*blockIdx.x+threadIdx.x; if(index<N){ dist[index]=0; nor[index]=glm::vec3(0); for(int i=0;i<primitiveNum;++i){ float d=0; glm::vec3 n(0); if(primitive[i].type==0){ if(CubeIntersectionTest(pos[index],n,primitive[i].cSize,primitive[i].pos,d)){ if (d < dist[index]){ dist[index] = d; nor[index]= n; } }//if }//if else if(primitive[i].type==1){ if(SphereIntersectionTest(pos[index],n,primitive[i].radius,primitive[i].pos,d)){ if (d < dist[index]){ dist[index] = d; nor[index]= n; } }//if } else if(primitive[i].type==2){ if(PlaneIntersectionTest(pos[index],n,primitive[i].pNormal,primitive[i].pos,d)){ if (d < dist[index]){ dist[index] = d; nor[index]= n; } }//if } else if(primitive[i].type==3){ if(ObjectIntersectionTest(pos[index],n,primitive[i].tree,primitive[i].pos,d,primitive[i].objVertex,primitive[i].objNormal,primitive[i].objIndices)){ if (d < dist[index]){ dist[index] = d; nor[index]= n; } } } } } } __global__ void collisionSolvingOnGPU(glm::vec3 *pos,glm::vec3 *vel,float *dist,glm::vec3 *nor,float restitution_coefficient,int N){ int index=blockDim.x*blockIdx.x+threadIdx.x; if(index<N&&glm::length(nor[index])>0.1){ pos[index]-=nor[index]*dist[index]; float n=glm::dot(nor[index],vel[index]); vel[index]+=-(1+restitution_coefficient)*n*nor[index]; } } __global__ void addGravityOnGPU(glm::vec3 *force,float mass,int N){ int index=blockDim.x*blockIdx.x+threadIdx.x; if(index<N){ force[index].y=-9.80f*mass; force[index].x=0; force[index].z=0; } } __global__ void PBDProjectKernel(GPUConstraint *constraint,glm::vec3 *p,int *torn,int *torn_id,float tear_value,bool tearable,int N,int ns){ int index=blockDim.x*blockIdx.x+threadIdx.x; if(index<N){ if(constraint[index].type==0&&constraint[index].active){//Attachment Constraint float k_prime=1.0-pow(1.0-constraint[index].stiffnessPBD,1.0/ns); glm::vec3 v=p[constraint[index].fix_index]; glm::vec3 dp=constraint[index].fixedPoint-v; atomicAdd(&p[constraint[index].fix_index].x,k_prime*dp.x); atomicAdd(&p[constraint[index].fix_index].y,k_prime*dp.y); atomicAdd(&p[constraint[index].fix_index].z,k_prime*dp.z); //p[constraint[index].fix_index]+=k_prime*dp; } else if(constraint[index].type==1&&constraint[index].active){//Spring Constraint float k_prime=1.0-pow(1.0-constraint[index].stiffnessPBD,1.0/ns); float rest_length=constraint[index].rest_length; glm::vec3 v1=p[constraint[index].p1]; glm::vec3 v2=p[constraint[index].p2]; float current_length=glm::length(v1-v2); glm::vec3 current_direction=(v1-v2)/current_length; glm::vec3 dp=(current_length-rest_length)*current_direction; atomicAdd(&p[constraint[index].p1].x,-0.5f*k_prime*dp.x); atomicAdd(&p[constraint[index].p1].y,-0.5f*k_prime*dp.y); atomicAdd(&p[constraint[index].p1].z,-0.5f*k_prime*dp.z); atomicAdd(&p[constraint[index].p2].x,0.5f*k_prime*dp.x); atomicAdd(&p[constraint[index].p2].y,0.5f*k_prime*dp.y); atomicAdd(&p[constraint[index].p2].z,0.5f*k_prime*dp.z); if(tearable&&current_length>tear_value*rest_length){ torn[0]=1; constraint[index].active=false; if(constraint[index].triangleId1!=-1) torn_id[constraint[index].triangleId1]=1; if(constraint[index].triangleId2!=-1) torn_id[constraint[index].triangleId2]=1; } //p[constraint[index].p1]-=0.5f*k_prime*dp; //p[constraint[index].p2]+=0.5f*k_prime*dp; } } } __global__ void kern_compute_force(glm::vec3* dev_force, glm::vec3* dev_pos, GPUConstraint *dev_constraint, int Num_Constraint) { int index = blockDim.x*blockIdx.x + threadIdx.x; if (index < Num_Constraint) { if (dev_constraint[index].type == 0&&dev_constraint[index].active) //attachment constraint { glm::vec3 p0 = dev_constraint[index].fixedPoint; glm::vec3 p1 = dev_pos[dev_constraint[index].fix_index]; float cur_len = glm::length(p1 - p0); float stiffness = dev_constraint[index].stiffness; glm::vec3 cur_force = stiffness*(p0 - p1); //// atomic add atomicAdd(&(dev_force[dev_constraint[index].fix_index].x), cur_force.x); atomicAdd(&(dev_force[dev_constraint[index].fix_index].y), cur_force.y); atomicAdd(&(dev_force[dev_constraint[index].fix_index].z), cur_force.z); //dev_force[dev_constraint[index].fix_index] += cur_force; } else if (dev_constraint[index].type == 1) //spring constraint { glm::vec3 p1 = dev_pos[dev_constraint[index].p1]; glm::vec3 p2 = dev_pos[dev_constraint[index].p2]; float cur_len = glm::length(p1 - p2); float stiffness = dev_constraint[index].stiffness; glm::vec3 cur_force = stiffness*(cur_len - dev_constraint[index].rest_length) / cur_len*(p2 - p1); //// atomic add atomicAdd(&(dev_force[dev_constraint[index].p1].x), cur_force.x); atomicAdd(&(dev_force[dev_constraint[index].p1].y), cur_force.y); atomicAdd(&(dev_force[dev_constraint[index].p1].z), cur_force.z); atomicAdd(&(dev_force[dev_constraint[index].p2].x), -cur_force.x); atomicAdd(&(dev_force[dev_constraint[index].p2].y), -cur_force.y); atomicAdd(&(dev_force[dev_constraint[index].p2].z), -cur_force.z); //dev_force[dev_constraint[index].p1] += cur_force; //dev_force[dev_constraint[index].p2] -= cur_force; } } } __global__ void kern_RK4_computation(glm::vec3 *dev_out, glm::vec3 *dev_k1, glm::vec3 *dev_k2, glm::vec3 *dev_k3, glm::vec3 *dev_k4, float a, int N) { int index = blockDim.x*blockIdx.x + threadIdx.x; if (index < N) { dev_out[index] = dev_out[index] + a * (dev_k1[index] + 2.f* dev_k2[index] + 2.f * dev_k3[index] + dev_k4[index]); } } void initData(){ hipMalloc(&dev_pos,dimension*sizeof(glm::vec3)); hipMalloc(&dev_vel,dimension*sizeof(glm::vec3)); hipMalloc(&dev_pos_temp1, dimension*sizeof(glm::vec3)); hipMalloc(&dev_vel_temp1, dimension*sizeof(glm::vec3)); hipMalloc(&dev_force,dimension*sizeof(glm::vec3)); hipMalloc(&dev_external_force, dimension*sizeof(glm::vec3)); hipMalloc(&dev_pbd,dimension*sizeof(glm::vec3)); hipMalloc(&dev_constraint,(constraintNum+100)*sizeof(GPUConstraint));//give 100 more space for additional attachment constraint hipMalloc(&dev_primitive,primitiveNum*sizeof(GPUPrimitive)); hipMalloc(&dev_collisionNormal,dimension*sizeof(glm::vec3)); hipMalloc(&dev_dist,dimension*sizeof(float)); hipMalloc(&dev_angular_momentum,dimension*sizeof(float)); hipMalloc(&dev_inertia,dimension*sizeof(glm::mat3x3)); hipMalloc(&dev_k1_x, dimension*sizeof(glm::vec3)); hipMalloc(&dev_k1_v, dimension*sizeof(glm::vec3)); hipMalloc(&dev_k2_x, dimension*sizeof(glm::vec3)); hipMalloc(&dev_k2_v, dimension*sizeof(glm::vec3)); hipMalloc(&dev_k3_x, dimension*sizeof(glm::vec3)); hipMalloc(&dev_k3_v, dimension*sizeof(glm::vec3)); hipMalloc(&dev_k4_x, dimension*sizeof(glm::vec3)); hipMalloc(&dev_k4_v, dimension*sizeof(glm::vec3)); hipMalloc(&dev_vel_implicit, 3 * dimension*sizeof(float)); hipMalloc(&dev_force_implicit, 3 * dimension*sizeof(float)); hipMalloc(&dev_b_implicit, 3 * dimension*sizeof(float)); hipMalloc(&dev_torn_id,triangleNum*sizeof(int)); hipMalloc(&dev_torn,sizeof(int)); hipMalloc(&dev_primitive_pos,primitiveNum*sizeof(glm::vec3)); hipMemcpy(dev_pos,pos,dimension*sizeof(glm::vec3),hipMemcpyHostToDevice); hipMemcpy(dev_vel,vel,dimension*sizeof(glm::vec3),hipMemcpyHostToDevice); hipMemcpy(dev_constraint,constraint,constraintNum*sizeof(GPUConstraint),hipMemcpyHostToDevice); hipMemcpy(dev_primitive,primitive,primitiveNum*sizeof(GPUPrimitive),hipMemcpyHostToDevice); hipMemset(dev_force,0,dimension*sizeof(glm::vec3)); hipMemset(dev_external_force,0,dimension*sizeof(glm::vec3)); hipMemset(dev_torn_id,0,triangleNum*sizeof(int)); hipMemset(dev_torn,0,sizeof(int)); } void deleteData(){ hipFree(dev_constraint); hipFree(dev_primitive); hipFree(dev_pos); hipFree(dev_vel); hipFree(dev_force); hipFree(dev_external_force); hipFree(dev_pbd); hipFree(dev_collisionNormal); hipFree(dev_dist); hipFree(dev_angular_momentum); hipFree(dev_inertia); hipFree(dev_k1_x); hipFree(dev_k1_v); hipFree(dev_k2_x); hipFree(dev_k2_v); hipFree(dev_k3_x); hipFree(dev_k3_v); hipFree(dev_k4_x); hipFree(dev_k4_v); hipFree(dev_pos_temp1); hipFree(dev_vel_temp1); hipFree(dev_torn_id); hipFree(dev_torn); hipFree(dev_primitive_pos); delete(force); delete(pos); delete(vel); delete(pos_temp1); delete(vel_temp1); delete(angular_momentum); delete(inertia); delete(torn_id); } void copyData(GPUConstraint *GConstraint,GPUPrimitive *GPrimitive,glm::vec3 *Gpos,glm::vec3 *Gvel,int Gheight,int Gwidth ,int GconstraintNum,int GspringConstraintNum,int GprimitiveNum,int GtriangleNum,float Gmass, float Grestitution_coefficient,float Gdamping_coefficient,float Gtear_value,bool Gtearable){ constraint=GConstraint; primitive=GPrimitive; height=Gheight; width=Gwidth; dimension=height*width; constraintNum=GconstraintNum; springConstraintNum=GspringConstraintNum; primitiveNum=GprimitiveNum; triangleNum=GtriangleNum; mass=Gmass; restitution_coefficient=Grestitution_coefficient; damping_coefficient=Gdamping_coefficient; tear_value=Gtear_value; tearable=Gtearable; force=new glm::vec3[height*width]; pos=new glm::vec3[height*width]; vel=new glm::vec3[height*width]; pos_temp1=new glm::vec3[height*width]; vel_temp1=new glm::vec3[height*width]; angular_momentum=new glm::vec3[height*width]; inertia=new glm::mat3x3[height*width]; torn_id=new int[triangleNum]; for(int i=0;i<height*width;++i){ force[i]=glm::vec3(0); pos[i]=Gpos[i]; vel[i]=Gvel[i]; } initData(); } void calculateExternalForceoOnGPU() { hipMemset(dev_external_force,0,dimension*sizeof(glm::vec3)); addGravityOnGPU << <(dimension + 255) / 256, 256 >> >(dev_external_force, mass, dimension); } void detectCollisionOnGPU(){ hipMemset(dev_collisionNormal,0,dimension*sizeof(glm::vec3));//reset every time,may need stream compaction to improve hipMemset(dev_dist,0,dimension*sizeof(float)); //cout<<primitive[1].tree->index<<endl; //if(primitive[1].tree==nullptr) cout<<"null"<<endl; hipLaunchKernelGGL(( collisionDetectionKernel), dim3((dimension+255)/256),dim3(256), 0, 0, dev_pos,dev_collisionNormal,dev_dist,dev_primitive,primitiveNum,dimension); } void resolveCollisionOnGPU(){ hipLaunchKernelGGL(( collisionSolvingOnGPU), dim3((dimension+255)/256),dim3(256), 0, 0, dev_pos,dev_vel,dev_dist,dev_collisionNormal,restitution_coefficient,dimension); hipMemcpy(pos,dev_pos,dimension*sizeof(glm::vec3),hipMemcpyDeviceToHost); } __global__ void calculateAngularMomentum(glm::vec3 *pos,glm::vec3 *vel,glm::vec3 tmpPos,glm::vec3 *angular_momentum, glm::mat3x3 *inertia,float mass,int N){ int index=blockDim.x*blockIdx.x+threadIdx.x; if(index<N){ glm::vec3 r=pos[index]-tmpPos; angular_momentum[index]+=mass*glm::cross(r,vel[index]); glm::mat3x3 r_mat(1); r_mat[0][1]=r.z; r_mat[0][2]=-r.y; r_mat[1][0]=-r.z; r_mat[1][2]=r.x; r_mat[2][0]=r.y; r_mat[2][1]=-r.x; inertia[index]=r_mat*glm::transpose(r_mat)*mass; } } __global__ void calculateVelocityDamping(glm::vec3 *pos,glm::vec3 *vel,glm::vec3 tmpPos,glm::vec3 tmpVel, glm::vec3 angular_momentum,glm::mat3x3 inertia,float damping_coefficient,int N){ int index=blockDim.x*blockIdx.x+threadIdx.x; if(index<N){ glm::vec3 r=pos[index]-tmpPos; glm::vec3 angular_vel=glm::inverse(inertia)*angular_momentum; glm::vec3 delta_v=tmpVel+glm::cross(angular_vel,r)-vel[index]; vel[index]+=damping_coefficient*delta_v; } } void dampVelocityOnGPU() { if (std::abs(damping_coefficient) < 1e-15) return; hipMemcpy(pos_temp1,dev_pos,dimension*sizeof(glm::vec3),hipMemcpyDeviceToHost); hipMemcpy(vel_temp1,dev_vel,dimension*sizeof(glm::vec3),hipMemcpyDeviceToHost); thrust::inclusive_scan(pos_temp1,pos_temp1+dimension,pos_temp1); thrust::inclusive_scan(vel_temp1,vel_temp1+dimension,vel_temp1); hipLaunchKernelGGL(( calculateAngularMomentum), dim3((dimension+255)/256),dim3(256), 0, 0, dev_pos,dev_vel,pos_temp1[dimension-1]/(1.0f*dimension),dev_angular_momentum, dev_inertia,mass,dimension); hipMemcpy(angular_momentum,dev_angular_momentum,dimension*sizeof(glm::vec3),hipMemcpyDeviceToHost); hipMemcpy(inertia,dev_inertia,dimension*sizeof(glm::mat3x3),hipMemcpyDeviceToHost); thrust::inclusive_scan(angular_momentum,angular_momentum+dimension,angular_momentum); thrust::inclusive_scan(inertia,inertia+dimension,inertia); hipLaunchKernelGGL(( calculateVelocityDamping), dim3((dimension+255)/256),dim3(256), 0, 0, dev_pos,dev_vel,pos_temp1[dimension-1]/(1.0f*dimension),vel_temp1[dimension-1]/(1.0f*dimension), angular_momentum[dimension-1],inertia[dimension-1],damping_coefficient,dimension); hipMemcpy(pos,dev_pos,dimension*sizeof(glm::vec3),hipMemcpyDeviceToHost); } void integratePBDOnGPU(int ns,float dt) { for(int i=0;i<ns;++i){ hipLaunchKernelGGL(( vector_add_mulvector), dim3((dimension+255)/256),dim3(256), 0, 0, dev_vel,dev_external_force,dev_vel,dt*1.0/mass,dimension); hipLaunchKernelGGL(( vector_add_mulvector), dim3((dimension+255)/256),dim3(256), 0, 0, dev_pos,dev_vel,dev_pbd,dt,dimension); hipLaunchKernelGGL(( PBDProjectKernel), dim3((constraintNum+255)/256),dim3(256), 0, 0, dev_constraint,dev_pbd,dev_torn,dev_torn_id,tear_value,tearable,constraintNum,ns); hipLaunchKernelGGL(( vector_minus_vector_mul), dim3((dimension+255)/256),dim3(256), 0, 0, dev_pbd,dev_pos,dev_vel,1.0/(dt),dimension); hipLaunchKernelGGL(( vector_copy_vector), dim3((dimension+255)/256),dim3(256), 0, 0, dev_pos,dev_pbd,dimension); detectCollisionOnGPU(); resolveCollisionOnGPU(); } } //==================== integration ==================== void integrateExplicitEuler_GPU(float dt) { hipMemset(dev_force, 0, dimension*sizeof(glm::vec3)); //compute force kern_compute_force << <(constraintNum + 255) / 256, 256 >> >(dev_force, dev_pos, dev_constraint, constraintNum); hipDeviceSynchronize(); cudaCheckErrors("kernel fail"); //add external force vector_add_vector << <(dimension + 255) / 256, 256 >> > (dev_force,dev_external_force,dev_force,dimension); //pos vector_add_mulvector << <(dimension + 255) / 256, 256 >> > (dev_pos, dev_vel, dev_pos,dt,dimension ); //vel float dt_inv_mass = dt / mass; vector_add_mulvector << <(dimension + 255) / 256, 256 >> > (dev_vel, dev_force, dev_vel, dt_inv_mass, dimension); //clear the force mem //hipMemset(dev_force, 0, dimension); hipMemcpy(pos, dev_pos, dimension*sizeof(glm::vec3), hipMemcpyDeviceToHost); } void integrateExplicitRK2_GPU(float dt) { hipMemset(dev_force, 0, dimension*sizeof(glm::vec3)); //compute force kern_compute_force << <(constraintNum + 255) / 256, 256 >> >(dev_force, dev_pos, dev_constraint, constraintNum); hipDeviceSynchronize(); cudaCheckErrors("kernel fail"); //add external force vector_add_vector << <(dimension + 255) / 256, 256 >> > (dev_force, dev_external_force, dev_force,dimension); //pos vector_add_mulvector << <(dimension + 255) / 256, 256 >> > (dev_pos, dev_vel, dev_pos_temp1, dt, dimension); //vel float dt_inv_mass = dt / mass; vector_add_mulvector << <(dimension + 255) / 256, 256 >> > (dev_vel, dev_force, dev_vel_temp1, dt_inv_mass, dimension); hipMemset(dev_force,0,dimension*sizeof(glm::vec3)); kern_compute_force << <(constraintNum + 255) / 256, 256 >> >(dev_force, dev_pos_temp1, dev_constraint, constraintNum); //add external force vector_add_vector << <(dimension + 255) / 256, 256 >> > (dev_force, dev_external_force, dev_force,dimension); //pos vector_add_mulvector << <(dimension + 255) / 256, 256 >> > (dev_pos, dev_vel_temp1, dev_pos, dt, dimension); //vel vector_add_mulvector << <(dimension + 255) / 256, 256 >> > (dev_vel, dev_force, dev_vel, dt_inv_mass, dimension); //clear the force mem //hipMemset(dev_force, 0, dimension); hipMemcpy(pos, dev_pos, dimension*sizeof(glm::vec3), hipMemcpyDeviceToHost); } void integrateExplicitRK4_GPU(float dt) { float half_dt = dt / 2; float inv_mass = 1.f / mass; //step1 hipMemset(dev_force, 0, dimension*sizeof(glm::vec3)); //compute force kern_compute_force << <(constraintNum + 255) / 256, 256 >> >(dev_force, dev_pos, dev_constraint, constraintNum); hipDeviceSynchronize(); //add external force vector_add_vector << <(dimension + 255) / 256, 256 >> > (dev_force, dev_external_force, dev_force,dimension); hipMemcpy(dev_k1_x, dev_vel, dimension*sizeof(glm::vec3), hipMemcpyDeviceToDevice); vector_mul_scalar << <(constraintNum + 255) / 256, 256 >> >(dev_k1_v, dev_force, inv_mass, dimension); vector_add_mulvector << <(constraintNum + 255) / 256, 256 >> >(dev_pos, dev_k1_x, dev_pos_temp1, half_dt, dimension); vector_add_mulvector << <(constraintNum + 255) / 256, 256 >> >(dev_vel, dev_k1_v, dev_vel_temp1, half_dt, dimension); //step 2 hipMemset(dev_force, 0, dimension*sizeof(glm::vec3)); //compute force kern_compute_force << <(constraintNum + 255) / 256, 256 >> >(dev_force, dev_pos_temp1, dev_constraint, constraintNum); hipDeviceSynchronize(); //add external force vector_add_vector << <(dimension + 255) / 256, 256 >> > (dev_force, dev_external_force, dev_force,dimension); hipMemcpy(dev_k2_x, dev_vel_temp1, dimension*sizeof(glm::vec3), hipMemcpyDeviceToDevice); vector_mul_scalar << <(constraintNum + 255) / 256, 256 >> >(dev_k2_v, dev_force, inv_mass, dimension); vector_add_mulvector << <(constraintNum + 255) / 256, 256 >> >(dev_pos, dev_k2_x, dev_pos_temp1, half_dt, dimension); vector_add_mulvector << <(constraintNum + 255) / 256, 256 >> >(dev_vel, dev_k2_v, dev_vel_temp1, half_dt, dimension); //step3 hipMemset(dev_force, 0, dimension*sizeof(glm::vec3)); //compute force kern_compute_force << <(constraintNum + 255) / 256, 256 >> >(dev_force, dev_pos_temp1, dev_constraint, constraintNum); hipDeviceSynchronize(); //add external force vector_add_vector << <(dimension + 255) / 256, 256 >> > (dev_force, dev_external_force, dev_force,dimension); hipMemcpy(dev_k3_x, dev_vel_temp1, dimension*sizeof(glm::vec3), hipMemcpyDeviceToDevice); vector_mul_scalar << <(constraintNum + 255) / 256, 256 >> >(dev_k3_v, dev_force, inv_mass, dimension); vector_add_mulvector << <(constraintNum + 255) / 256, 256 >> >(dev_pos, dev_k2_x, dev_pos_temp1, dt, dimension); vector_add_mulvector << <(constraintNum + 255) / 256, 256 >> >(dev_vel, dev_k2_v, dev_vel_temp1, dt, dimension); //step4 hipMemset(dev_force, 0, dimension*sizeof(glm::vec3)); //compute force kern_compute_force << <(constraintNum + 255) / 256, 256 >> >(dev_force, dev_pos_temp1, dev_constraint, constraintNum); hipDeviceSynchronize(); //add external force vector_add_vector << <(dimension + 255) / 256, 256 >> > (dev_force, dev_external_force, dev_force,dimension); hipMemcpy(dev_k4_x, dev_vel_temp1, dimension*sizeof(glm::vec3), hipMemcpyDeviceToDevice); vector_mul_scalar << <(constraintNum + 255) / 256, 256 >> >(dev_k4_v, dev_force, inv_mass, dimension); //all together float a = dt / 6.f; kern_RK4_computation << <(constraintNum + 255) / 256, 256 >> >(dev_pos, dev_k1_x, dev_k2_x, dev_k3_x, dev_k4_x, a, dimension); kern_RK4_computation << <(constraintNum + 255) / 256, 256 >> >(dev_vel, dev_k1_v, dev_k2_v, dev_k3_v, dev_k4_v, a, dimension); hipMemcpy(pos, dev_pos, dimension*sizeof(glm::vec3), hipMemcpyDeviceToHost); } void integrateImplicitBW_GPU(float dt) { hipMemset(dev_force, 0, dimension*sizeof(glm::vec3)); //compute force kern_compute_force << <(constraintNum + 255) / 256, 256 >> >(dev_force, dev_pos, dev_constraint, constraintNum); hipDeviceSynchronize(); //add external force vector_add_vector << <(dimension + 255) / 256, 256 >> > (dev_force, dev_external_force, dev_force, dimension); //convert to implicit data convert_2_implicit_data << <(dimension + 255) / 256, 256 >> > (dev_vel,dev_vel_implicit,dimension); convert_2_implicit_data << <(dimension + 255) / 256, 256 >> > (dev_force, dev_force_implicit, dimension); compute_b << <(3 * dimension + 255) / 256, 256 >> > (mass, dt, dev_vel_implicit, dev_force_implicit, dev_b_implicit, 3 * dimension); // --- create library handles: cusolverSpHandle_t cusolver_handle; cusolverStatus_t cusolver_status; cusolver_status = cusolverSpCreate(&cusolver_handle); //std::cout << "status create cusolver handle: " << cusolver_status << std::endl; hipsparseHandle_t cusparse_handle; hipsparseStatus_t cusparse_status; cusparse_status = hipsparseCreate(&cusparse_handle); //std::cout << "status create cusparse handle: " << cusparse_status << std::endl; hipsparseMatDescr_t descrA; cusparse_status = hipsparseCreateMatDescr(&descrA); //std::cout << "status cusparse createMatDescr: " << cusparse_status << std::endl; //solving float tol = 1e-3; int reorder = 0; int singularity = 0; //std::cout << dev_nnz << std::endl; cusolver_status = cusolverSpScsrlsvchol(cusolver_handle, 3 * dimension, dev_nnz, descrA, dev_Val, dev_csr_Rows, dev_Cols, dev_b_implicit, tol, reorder, dev_vel_implicit, &singularity); hipDeviceSynchronize(); //std::cout << "singularity (should be -1): " << singularity << std::endl; //std::cout << "status cusolver solving (!): " << cusolver_status << std::endl; // relocated these 2 lines from above to solve (2): cusparse_status = hipsparseDestroy(cusparse_handle); //std::cout << "status destroy cusparse handle: " << cusparse_status << std::endl; cusolver_status = cusolverSpDestroy(cusolver_handle); //std::cout << "status destroy cusolver handle: " << cusolver_status << std::endl; //convert the data back inv_convert_2_implicit_data << <(dimension + 255) / 256, 256 >> > (dev_vel_implicit, dev_vel, dimension); vector_add_mulvector << <(dimension + 255) / 256, 256 >> >(dev_pos, dev_vel, dev_pos, dt, dimension); } //==================== integration ==================== kdtree *initTree(kdtree *root){ //postorder method to first get the left and right child on GPU Memory, then replace it with the memory on CPU, then copy the whole point to GPU if(root==nullptr) return nullptr; kdtree *dev_lc=initTree(root->lc); kdtree *dev_rc=initTree(root->rc); kdtree *tmp=new kdtree(root); tmp->lc=dev_lc; tmp->rc=dev_rc; kdtree *dev_root; hipMalloc(&dev_root,sizeof(kdtree)); hipMemcpy(dev_root,tmp,sizeof(kdtree),hipMemcpyHostToDevice); return dev_root; } void updateAttachmentConstraintOnGPU(GPUConstraint *Gconstraint,int n){ hipMemset(dev_constraint+springConstraintNum,0,100*sizeof(GPUConstraint)); n=min(100,n);//no more than 100 Attachment Constraint hipMemcpy(dev_constraint+springConstraintNum,Gconstraint,n*sizeof(GPUConstraint),hipMemcpyHostToDevice); constraintNum=springConstraintNum+n; } void convertSystemMatrix(std::vector<int> &host_Rows, std::vector<int> &host_Cols, std::vector<float> &host_Val) { //step1 convert to coo format int nnz = host_Val.size(); dev_nnz = nnz; hipMalloc((void**)&dev_Val, nnz*sizeof(float)); hipMalloc((void**)&dev_coo_Rows, nnz*sizeof(int)); hipMalloc((void**)&dev_csr_Rows, (dimension*3 + 1)*sizeof(int)); hipMalloc((void**)&dev_Cols, nnz*sizeof(int)); hipMemcpy(dev_Val, host_Val.data(), nnz*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(dev_coo_Rows, host_Rows.data(), nnz*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(dev_Cols, host_Cols.data(), nnz*sizeof(int), hipMemcpyHostToDevice); //std::vector<float> hst_rows(nnz, 0); //hipMemcpy(hst_rows.data(), dev_Val, nnz*sizeof(float), hipMemcpyDeviceToHost); //for (int i = 0; i < 10; i++) //{ // std::cout << hst_rows[i] << std::endl; //} hipsparseHandle_t cusparse_handle; hipsparseStatus_t cusparse_status; cusparse_status = hipsparseCreate(&cusparse_handle); std::cout << "status create cusparse handle: " << cusparse_status << std::endl; cusparse_status = hipsparseXcoo2csr(cusparse_handle, dev_coo_Rows, nnz, dimension*3, dev_csr_Rows, HIPSPARSE_INDEX_BASE_ZERO); std::cout << "status cusparse coo2csr conversion: " << cusparse_status << std::endl; hipDeviceSynchronize(); // matrix format conversion has to be finished! //check the matrix //hipsparseMatDescr_t descrA; //cusparse_status = hipsparseCreateMatDescr(&descrA); //std::cout << "status cusparse createMatDescr: " << cusparse_status << std::endl; // //std::vector<float> A(dimension * 3 * dimension * 3, 0); //float *dA; //hipMalloc((void**)&dA, A.size()*sizeof(float)); //hipsparseScsr2dense(cusparse_handle, dimension * 3, dimension * 3, descrA, dev_Val, // dev_csr_Rows, dev_Cols, dA, dimension * 3); //hipMemcpy(A.data(), dA, A.size()*sizeof(float), hipMemcpyDeviceToHost); //std::cout << "A: \n"; //for (int i = 0; i < 10; ++i) { // for (int j = 0; j < 10; ++j) { // std::cout << A[i*dimension * 3 + j] << " "; // } // std::cout << std::endl; //} //hipFree(dA); cusparse_status = hipsparseDestroy(cusparse_handle); std::cout << "status destroy cusparse handle: " << cusparse_status << std::endl; } glm::vec3 *getPos(){ return pos; } glm::vec3 *getVel(){ return vel; } bool isTorn(){ hipMemcpy(&torn,dev_torn,sizeof(int),hipMemcpyDeviceToHost); if(torn==1){ hipMemset(dev_torn,0,sizeof(int)); return true; } return false; } int *getTornId(){ hipMemcpy(torn_id,dev_torn_id,triangleNum*sizeof(int),hipMemcpyDeviceToHost); return torn_id; } void resetTornFlag(){ torn=0; hipMemset(dev_torn,0,sizeof(int)); } __global__ void updatePrimitivePostionOnGPU(GPUPrimitive *primitive,glm::vec3 *newPos,int N){ int index=blockIdx.x*blockDim.x+threadIdx.x; if(index<N){ primitive[index].pos=newPos[index]; } } void updatePrimitivePosition(glm::vec3 *newPos){ hipMemcpy(dev_primitive_pos,newPos,primitiveNum*sizeof(glm::vec3),hipMemcpyHostToDevice); hipLaunchKernelGGL(( updatePrimitivePostionOnGPU), dim3((primitiveNum+255)/256),dim3(256), 0, 0, dev_primitive,dev_primitive_pos,primitiveNum); } /* test cuda core function */ __global__ void test(int *a,int *b,int *c,int N){ int index=blockIdx.x*blockDim.x+threadIdx.x; if(index<N){ c[index]=a[index]+b[index]; } } /* test function for cuda setup */ void testCuda(){ int *a,*b,*c; int *dev_a,*dev_b,*dev_c; a=new int[10]; b=new int[10]; c=new int[10]; for(int i=0;i<10;++i){ a[i]=i; b[i]=10-i; } hipMalloc(&dev_a,10*sizeof(int)); hipMalloc(&dev_b,10*sizeof(int)); hipMalloc(&dev_c,10*sizeof(int)); hipMemcpy(dev_a,a,10*sizeof(int),hipMemcpyHostToDevice); hipMemcpy(dev_b,b,10*sizeof(int),hipMemcpyHostToDevice); hipLaunchKernelGGL(( test), dim3(1),dim3(256), 0, 0, dev_a,dev_b,dev_c,10); hipMemcpy(c,dev_c,10*sizeof(int),hipMemcpyDeviceToHost); for(int i=0;i<10;++i){ std::cout<<a[i]<<","<<b[i]<<","<<c[i]<<std::endl; } }
dd9187ef66642b1ad1ce7f62fd3fa184963fac97.cu
#include "cuda.h" #include <iostream> #include "kernel.h" #include <cuda_runtime.h> #include <cublas_v2.h> #include <cusolverDn.h> #define cudaCheckErrors(msg) \ do { \ cudaError_t __err = cudaGetLastError(); \ if (__err != cudaSuccess) { \ fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \ msg, cudaGetErrorString(__err), \ __FILE__, __LINE__); \ fprintf(stderr, "*** FAILED - ABORTING\n"); \ system("pause");\ exit(1); \ } \ } while (0) static GPUConstraint *constraint,*dev_constraint; static GPUPrimitive *primitive,*dev_primitive; static glm::vec3 *pos,*dev_pos; static glm::vec3 *vel,*dev_vel; static glm::vec3 *force,*dev_force; static glm::vec3 *dev_pbd; static glm::vec3 *dev_collisionNormal; static glm::vec3 *angular_momentum,*dev_angular_momentum; static glm::mat3x3 *inertia,*dev_inertia; static float *dev_dist; static int height,width,dimension,constraintNum,primitiveNum,triangleNum; static float mass; static float restitution_coefficient,damping_coefficient; static int torn,*dev_torn; static int *torn_id,*dev_torn_id; static glm::vec3 * dev_k1_x; static glm::vec3 * dev_k1_v; static glm::vec3 * dev_k2_x; static glm::vec3 * dev_k2_v; static glm::vec3 * dev_k3_x; static glm::vec3 * dev_k3_v; static glm::vec3 * dev_k4_x; static glm::vec3 * dev_k4_v; static glm::vec3 * dev_pos_temp1,*pos_temp1; static glm::vec3 * dev_vel_temp1,*vel_temp1; static glm::vec3 * dev_external_force; static float* dev_vel_implicit; static float* dev_b_implicit; static float* dev_force_implicit; static int* dev_coo_Rows; static int* dev_csr_Rows; static int* dev_Cols; static float* dev_Val; static int dev_nnz; static int springConstraintNum; static float tear_value; static bool tearable; static glm::vec3 *dev_primitive_pos; /* helper function for matrix operation */ //matrix copy matrix v1=v2 __global__ void vector_copy_vector(glm::vec3 *v1,glm::vec3 *v2,int N){ int index=blockDim.x*blockIdx.x+threadIdx.x; if(index<N){ v1[index]=v2[index]; } } //matrix add matrix v3=v1+v2 __global__ void vector_add_vector(glm::vec3 *v1,glm::vec3 *v2,glm::vec3 *v3, int N){ int index=blockDim.x*blockIdx.x+threadIdx.x; if(index<N){ v3[index]=v1[index]+v2[index]; } } //matrix add matrix times a factor v3=v1+v2*mul __global__ void vector_add_mulvector(glm::vec3 *v1,glm::vec3 *v2,glm::vec3 *v3,float mul,int N){ int index=blockDim.x*blockIdx.x+threadIdx.x; if(index<N){ v3[index]=v1[index]+mul*v2[index]; } } //matrix minus matrix v3=v1-v2 __global__ void vector_minus_vector(glm::vec3 *v1,glm::vec3 *v2,glm::vec3 *v3,int N){ int index=blockDim.x*blockIdx.x+threadIdx.x; if(index<N){ v3[index]=v1[index]-v2[index]; } } //matrix minus matrix times a factor v3=v1-v2*mul __global__ void vector_minus_mulvector(glm::vec3 *v1,glm::vec3 *v2,glm::vec3 *v3,float mul,int N){ int index=blockDim.x*blockIdx.x+threadIdx.x; if(index<N){ v3[index]=v1[index]-mul*v2[index]; } } //matrix times a factor v_out=v_in*mul __global__ void vector_mul_scalar(glm::vec3 * v_out, glm::vec3 * v_in, float mul ,int N) { int index = blockDim.x*blockIdx.x + threadIdx.x; if (index<N){ v_out[index] = mul * v_in[index]; } } //matrix minus a matrix and times a factor v3=(v1-v2)*mul __global__ void vector_add_vector_mul(glm::vec3 *v1,glm::vec3 *v2,glm::vec3 *v3,float mul,int N){ int index=blockDim.x*blockIdx.x+threadIdx.x; if(index<N){ v3[index]=(v1[index]+v2[index])*mul; } } //matrix add a matrix and times a factor v3=(v1+v2)*mul __global__ void vector_minus_vector_mul(glm::vec3 *v1,glm::vec3 *v2,glm::vec3 *v3,float mul,int N){ int index=blockDim.x*blockIdx.x+threadIdx.x; if(index<N){ v3[index]=(v1[index]-v2[index])*mul; } } // implicit method Ax=b compute the rhs __global__ void compute_b(float mass, float dt, float* _dev_v, float* _dev_force, float* b,int N) { int index = blockDim.x*blockIdx.x + threadIdx.x; if (index<N){ b[index] = mass*_dev_v[index] + dt*_dev_force[index]; } } __global__ void convert_2_implicit_data(glm::vec3* data_in, float* data_out, int N) { int index = blockDim.x*blockIdx.x + threadIdx.x; if (index<N){ data_out[3 * index] = data_in[index].x; data_out[3 * index + 1] = data_in[index].y; data_out[3 * index + 2] = data_in[index].z; } } __global__ void inv_convert_2_implicit_data(float* data_in, glm::vec3* data_out, int N) { int index = blockDim.x*blockIdx.x + threadIdx.x; if (index<N){ data_out[index].x = data_in[3 * index]; data_out[index].y = data_in[3 * index + 1]; data_out[index].z = data_in[3 * index + 2]; } } /* helper function for matrix operation */ /* Intersection Test */ __device__ bool CubeIntersectionTest(glm::vec3 &p, glm::vec3 &normal, glm::vec3 dim,glm::vec3 center,float &dist) { float COLLISION_EPSILON=1e-1; glm::vec3 diff = p - center; float xcollide,ycollide,zcollide; xcollide=(fabs(diff.x)-dim.x-COLLISION_EPSILON); ycollide=(fabs(diff.y)-dim.y-COLLISION_EPSILON); zcollide=(fabs(diff.z)-dim.z-COLLISION_EPSILON); if(xcollide<0&&ycollide<0&&zcollide<0){ if(xcollide>=ycollide&&xcollide>=zcollide){ if(diff.x>0){ normal=glm::vec3(1,0,0); } else{ normal=glm::vec3(-1,0,0); } dist=xcollide; return true; } else if(ycollide>=xcollide&&ycollide>=zcollide){ if(diff.y>0){ normal=glm::vec3(0,1,0); } else{ normal=glm::vec3(0,-1,0); } dist=ycollide; return true; } else if(zcollide>=ycollide&&zcollide>=xcollide){ if(diff.z>0){ normal=glm::vec3(0,0,1); } else{ normal=glm::vec3(0,0,-1); } dist=zcollide; return true; } } else { dist=1; return false; } } __device__ bool SphereIntersectionTest(glm::vec3 &p, glm::vec3 &normal,float radius, glm::vec3 center,float &dist) { float COLLISION_EPSILON=1e-1; glm::vec3 diff = p - center; dist = glm::length(diff) - radius - COLLISION_EPSILON; if (dist < 0) { normal = glm::normalize(diff); return true; } else { return false; } } __device__ bool PlaneIntersectionTest(glm::vec3 &p, glm::vec3 &normal, glm::vec3 pNormal, glm::vec3 center,float &dist) { float COLLISION_EPSILON=1e-1; float height = center[1]; dist = p.y - height - COLLISION_EPSILON; normal=pNormal; if (dist < 0) { return true; } else { return false; } } __device__ bool insideBoxOnGPU(glm::vec3 pos,kdtree *tree){ if(pos.x<=tree->xMax&&pos.x>=tree->xMin&&pos.y<=tree->yMax&&pos.y>=tree->yMin&& pos.z<=tree->zMax&&pos.z>=tree->zMin){ return true; } else return false; } __device__ void getNearbyTrianglesOnGPU(glm::vec3 pos,kdtree *tree, int *list){ int count=0,num=0,n=0; kdtree *kd[1000]; kd[count++]=tree; while(count<1000&&n!=count&&num<180){ kdtree *current=kd[n]; if(insideBoxOnGPU(pos,current)){ if(current->lc==nullptr&&current->rc==nullptr) list[num++]=current->index; else{ kd[count++]=current->lc; if(count>=1000) break; kd[count++]=current->rc; } } n++; } } __device__ glm::vec3 getNormalOnGPU(glm::vec3 *m_positions,glm::vec3 *m_normals,int *m_indices, unsigned short TriangleIndex){ glm::vec3 n1,n2,n3,v1,v2,v3,n,crossN,v12,v13; unsigned int index1,index2,index3; index1=m_indices[3*TriangleIndex]; index2=m_indices[3*TriangleIndex+1]; index3=m_indices[3*TriangleIndex+2]; v1=m_positions[index1];v2=m_positions[index2];v3=m_positions[index3]; n1=m_normals[index1];n2=m_normals[index2];n3=m_normals[index3]; v12=v1-v2;v13=v1-v3; v12=glm::normalize(v12);v13=glm::normalize(v13); crossN=glm::cross(v12,v13); crossN=glm::normalize(crossN); n=(n1+n2+n3); n=glm::normalize(n); if(glm::dot(n,crossN)<0) return -crossN; else return crossN; } __device__ float getDistanceOnGPU(glm::vec3 *m_positions,glm::vec3 *m_normals,int *m_indices,glm::vec3 p,unsigned short TriangleIndex){ float dis,k,x; unsigned short index; index=m_indices[3*TriangleIndex]; glm::vec3 normal=getNormalOnGPU(m_positions,m_normals,m_indices,TriangleIndex); glm::vec3 d=p-m_positions[index]; x=-(normal.x*d.x+normal.y*d.y+normal.z*d.z); //k=normal.x*normal.x+normal.y*normal.y+normal.z*normal.z; //dis=x/k; return x; } __device__ bool ObjectIntersectionTest(glm::vec3 & p, glm::vec3 & normal, kdtree *tree,glm::vec3 center,float &dist, glm::vec3 *obj_vertex,glm::vec3 *obj_normal,int *obj_indices) { // TODO float minDis=1e7; float COLLISION_EPSILON=1e-3; bool inCollision=false; glm::vec3 pos=p;//-center; int list[180]; for(int i=0;i<180;++i) list[i]=-1; getNearbyTrianglesOnGPU(pos,tree,list); pos-=center; for(int i=0;i<180;i++){ if(list[i]==-1) break; float tmp=getDistanceOnGPU(obj_vertex,obj_normal,obj_indices,pos,list[i]); if(tmp>0&&tmp<minDis&&tmp<0.1){ glm::vec3 n=getNormalOnGPU(obj_vertex,obj_normal,obj_indices,list[i]); normal=n; minDis=tmp; inCollision=true; } } dist=-minDis-COLLISION_EPSILON; return inCollision; } /* Intetsection Test */ __global__ void collisionDetectionKernel(glm::vec3 *pos,glm::vec3 *nor,float *dist,GPUPrimitive *primitive,int primitiveNum,int N){ int index=blockDim.x*blockIdx.x+threadIdx.x; if(index<N){ dist[index]=0; nor[index]=glm::vec3(0); for(int i=0;i<primitiveNum;++i){ float d=0; glm::vec3 n(0); if(primitive[i].type==0){ if(CubeIntersectionTest(pos[index],n,primitive[i].cSize,primitive[i].pos,d)){ if (d < dist[index]){ dist[index] = d; nor[index]= n; } }//if }//if else if(primitive[i].type==1){ if(SphereIntersectionTest(pos[index],n,primitive[i].radius,primitive[i].pos,d)){ if (d < dist[index]){ dist[index] = d; nor[index]= n; } }//if } else if(primitive[i].type==2){ if(PlaneIntersectionTest(pos[index],n,primitive[i].pNormal,primitive[i].pos,d)){ if (d < dist[index]){ dist[index] = d; nor[index]= n; } }//if } else if(primitive[i].type==3){ if(ObjectIntersectionTest(pos[index],n,primitive[i].tree,primitive[i].pos,d,primitive[i].objVertex,primitive[i].objNormal,primitive[i].objIndices)){ if (d < dist[index]){ dist[index] = d; nor[index]= n; } } } } } } __global__ void collisionSolvingOnGPU(glm::vec3 *pos,glm::vec3 *vel,float *dist,glm::vec3 *nor,float restitution_coefficient,int N){ int index=blockDim.x*blockIdx.x+threadIdx.x; if(index<N&&glm::length(nor[index])>0.1){ pos[index]-=nor[index]*dist[index]; float n=glm::dot(nor[index],vel[index]); vel[index]+=-(1+restitution_coefficient)*n*nor[index]; } } __global__ void addGravityOnGPU(glm::vec3 *force,float mass,int N){ int index=blockDim.x*blockIdx.x+threadIdx.x; if(index<N){ force[index].y=-9.80f*mass; force[index].x=0; force[index].z=0; } } __global__ void PBDProjectKernel(GPUConstraint *constraint,glm::vec3 *p,int *torn,int *torn_id,float tear_value,bool tearable,int N,int ns){ int index=blockDim.x*blockIdx.x+threadIdx.x; if(index<N){ if(constraint[index].type==0&&constraint[index].active){//Attachment Constraint float k_prime=1.0-pow(1.0-constraint[index].stiffnessPBD,1.0/ns); glm::vec3 v=p[constraint[index].fix_index]; glm::vec3 dp=constraint[index].fixedPoint-v; atomicAdd(&p[constraint[index].fix_index].x,k_prime*dp.x); atomicAdd(&p[constraint[index].fix_index].y,k_prime*dp.y); atomicAdd(&p[constraint[index].fix_index].z,k_prime*dp.z); //p[constraint[index].fix_index]+=k_prime*dp; } else if(constraint[index].type==1&&constraint[index].active){//Spring Constraint float k_prime=1.0-pow(1.0-constraint[index].stiffnessPBD,1.0/ns); float rest_length=constraint[index].rest_length; glm::vec3 v1=p[constraint[index].p1]; glm::vec3 v2=p[constraint[index].p2]; float current_length=glm::length(v1-v2); glm::vec3 current_direction=(v1-v2)/current_length; glm::vec3 dp=(current_length-rest_length)*current_direction; atomicAdd(&p[constraint[index].p1].x,-0.5f*k_prime*dp.x); atomicAdd(&p[constraint[index].p1].y,-0.5f*k_prime*dp.y); atomicAdd(&p[constraint[index].p1].z,-0.5f*k_prime*dp.z); atomicAdd(&p[constraint[index].p2].x,0.5f*k_prime*dp.x); atomicAdd(&p[constraint[index].p2].y,0.5f*k_prime*dp.y); atomicAdd(&p[constraint[index].p2].z,0.5f*k_prime*dp.z); if(tearable&&current_length>tear_value*rest_length){ torn[0]=1; constraint[index].active=false; if(constraint[index].triangleId1!=-1) torn_id[constraint[index].triangleId1]=1; if(constraint[index].triangleId2!=-1) torn_id[constraint[index].triangleId2]=1; } //p[constraint[index].p1]-=0.5f*k_prime*dp; //p[constraint[index].p2]+=0.5f*k_prime*dp; } } } __global__ void kern_compute_force(glm::vec3* dev_force, glm::vec3* dev_pos, GPUConstraint *dev_constraint, int Num_Constraint) { int index = blockDim.x*blockIdx.x + threadIdx.x; if (index < Num_Constraint) { if (dev_constraint[index].type == 0&&dev_constraint[index].active) //attachment constraint { glm::vec3 p0 = dev_constraint[index].fixedPoint; glm::vec3 p1 = dev_pos[dev_constraint[index].fix_index]; float cur_len = glm::length(p1 - p0); float stiffness = dev_constraint[index].stiffness; glm::vec3 cur_force = stiffness*(p0 - p1); //// atomic add atomicAdd(&(dev_force[dev_constraint[index].fix_index].x), cur_force.x); atomicAdd(&(dev_force[dev_constraint[index].fix_index].y), cur_force.y); atomicAdd(&(dev_force[dev_constraint[index].fix_index].z), cur_force.z); //dev_force[dev_constraint[index].fix_index] += cur_force; } else if (dev_constraint[index].type == 1) //spring constraint { glm::vec3 p1 = dev_pos[dev_constraint[index].p1]; glm::vec3 p2 = dev_pos[dev_constraint[index].p2]; float cur_len = glm::length(p1 - p2); float stiffness = dev_constraint[index].stiffness; glm::vec3 cur_force = stiffness*(cur_len - dev_constraint[index].rest_length) / cur_len*(p2 - p1); //// atomic add atomicAdd(&(dev_force[dev_constraint[index].p1].x), cur_force.x); atomicAdd(&(dev_force[dev_constraint[index].p1].y), cur_force.y); atomicAdd(&(dev_force[dev_constraint[index].p1].z), cur_force.z); atomicAdd(&(dev_force[dev_constraint[index].p2].x), -cur_force.x); atomicAdd(&(dev_force[dev_constraint[index].p2].y), -cur_force.y); atomicAdd(&(dev_force[dev_constraint[index].p2].z), -cur_force.z); //dev_force[dev_constraint[index].p1] += cur_force; //dev_force[dev_constraint[index].p2] -= cur_force; } } } __global__ void kern_RK4_computation(glm::vec3 *dev_out, glm::vec3 *dev_k1, glm::vec3 *dev_k2, glm::vec3 *dev_k3, glm::vec3 *dev_k4, float a, int N) { int index = blockDim.x*blockIdx.x + threadIdx.x; if (index < N) { dev_out[index] = dev_out[index] + a * (dev_k1[index] + 2.f* dev_k2[index] + 2.f * dev_k3[index] + dev_k4[index]); } } void initData(){ cudaMalloc(&dev_pos,dimension*sizeof(glm::vec3)); cudaMalloc(&dev_vel,dimension*sizeof(glm::vec3)); cudaMalloc(&dev_pos_temp1, dimension*sizeof(glm::vec3)); cudaMalloc(&dev_vel_temp1, dimension*sizeof(glm::vec3)); cudaMalloc(&dev_force,dimension*sizeof(glm::vec3)); cudaMalloc(&dev_external_force, dimension*sizeof(glm::vec3)); cudaMalloc(&dev_pbd,dimension*sizeof(glm::vec3)); cudaMalloc(&dev_constraint,(constraintNum+100)*sizeof(GPUConstraint));//give 100 more space for additional attachment constraint cudaMalloc(&dev_primitive,primitiveNum*sizeof(GPUPrimitive)); cudaMalloc(&dev_collisionNormal,dimension*sizeof(glm::vec3)); cudaMalloc(&dev_dist,dimension*sizeof(float)); cudaMalloc(&dev_angular_momentum,dimension*sizeof(float)); cudaMalloc(&dev_inertia,dimension*sizeof(glm::mat3x3)); cudaMalloc(&dev_k1_x, dimension*sizeof(glm::vec3)); cudaMalloc(&dev_k1_v, dimension*sizeof(glm::vec3)); cudaMalloc(&dev_k2_x, dimension*sizeof(glm::vec3)); cudaMalloc(&dev_k2_v, dimension*sizeof(glm::vec3)); cudaMalloc(&dev_k3_x, dimension*sizeof(glm::vec3)); cudaMalloc(&dev_k3_v, dimension*sizeof(glm::vec3)); cudaMalloc(&dev_k4_x, dimension*sizeof(glm::vec3)); cudaMalloc(&dev_k4_v, dimension*sizeof(glm::vec3)); cudaMalloc(&dev_vel_implicit, 3 * dimension*sizeof(float)); cudaMalloc(&dev_force_implicit, 3 * dimension*sizeof(float)); cudaMalloc(&dev_b_implicit, 3 * dimension*sizeof(float)); cudaMalloc(&dev_torn_id,triangleNum*sizeof(int)); cudaMalloc(&dev_torn,sizeof(int)); cudaMalloc(&dev_primitive_pos,primitiveNum*sizeof(glm::vec3)); cudaMemcpy(dev_pos,pos,dimension*sizeof(glm::vec3),cudaMemcpyHostToDevice); cudaMemcpy(dev_vel,vel,dimension*sizeof(glm::vec3),cudaMemcpyHostToDevice); cudaMemcpy(dev_constraint,constraint,constraintNum*sizeof(GPUConstraint),cudaMemcpyHostToDevice); cudaMemcpy(dev_primitive,primitive,primitiveNum*sizeof(GPUPrimitive),cudaMemcpyHostToDevice); cudaMemset(dev_force,0,dimension*sizeof(glm::vec3)); cudaMemset(dev_external_force,0,dimension*sizeof(glm::vec3)); cudaMemset(dev_torn_id,0,triangleNum*sizeof(int)); cudaMemset(dev_torn,0,sizeof(int)); } void deleteData(){ cudaFree(dev_constraint); cudaFree(dev_primitive); cudaFree(dev_pos); cudaFree(dev_vel); cudaFree(dev_force); cudaFree(dev_external_force); cudaFree(dev_pbd); cudaFree(dev_collisionNormal); cudaFree(dev_dist); cudaFree(dev_angular_momentum); cudaFree(dev_inertia); cudaFree(dev_k1_x); cudaFree(dev_k1_v); cudaFree(dev_k2_x); cudaFree(dev_k2_v); cudaFree(dev_k3_x); cudaFree(dev_k3_v); cudaFree(dev_k4_x); cudaFree(dev_k4_v); cudaFree(dev_pos_temp1); cudaFree(dev_vel_temp1); cudaFree(dev_torn_id); cudaFree(dev_torn); cudaFree(dev_primitive_pos); delete(force); delete(pos); delete(vel); delete(pos_temp1); delete(vel_temp1); delete(angular_momentum); delete(inertia); delete(torn_id); } void copyData(GPUConstraint *GConstraint,GPUPrimitive *GPrimitive,glm::vec3 *Gpos,glm::vec3 *Gvel,int Gheight,int Gwidth ,int GconstraintNum,int GspringConstraintNum,int GprimitiveNum,int GtriangleNum,float Gmass, float Grestitution_coefficient,float Gdamping_coefficient,float Gtear_value,bool Gtearable){ constraint=GConstraint; primitive=GPrimitive; height=Gheight; width=Gwidth; dimension=height*width; constraintNum=GconstraintNum; springConstraintNum=GspringConstraintNum; primitiveNum=GprimitiveNum; triangleNum=GtriangleNum; mass=Gmass; restitution_coefficient=Grestitution_coefficient; damping_coefficient=Gdamping_coefficient; tear_value=Gtear_value; tearable=Gtearable; force=new glm::vec3[height*width]; pos=new glm::vec3[height*width]; vel=new glm::vec3[height*width]; pos_temp1=new glm::vec3[height*width]; vel_temp1=new glm::vec3[height*width]; angular_momentum=new glm::vec3[height*width]; inertia=new glm::mat3x3[height*width]; torn_id=new int[triangleNum]; for(int i=0;i<height*width;++i){ force[i]=glm::vec3(0); pos[i]=Gpos[i]; vel[i]=Gvel[i]; } initData(); } void calculateExternalForceoOnGPU() { cudaMemset(dev_external_force,0,dimension*sizeof(glm::vec3)); addGravityOnGPU << <(dimension + 255) / 256, 256 >> >(dev_external_force, mass, dimension); } void detectCollisionOnGPU(){ cudaMemset(dev_collisionNormal,0,dimension*sizeof(glm::vec3));//reset every time,may need stream compaction to improve cudaMemset(dev_dist,0,dimension*sizeof(float)); //cout<<primitive[1].tree->index<<endl; //if(primitive[1].tree==nullptr) cout<<"null"<<endl; collisionDetectionKernel<<<(dimension+255)/256,256>>>(dev_pos,dev_collisionNormal,dev_dist,dev_primitive,primitiveNum,dimension); } void resolveCollisionOnGPU(){ collisionSolvingOnGPU<<<(dimension+255)/256,256>>>(dev_pos,dev_vel,dev_dist,dev_collisionNormal,restitution_coefficient,dimension); cudaMemcpy(pos,dev_pos,dimension*sizeof(glm::vec3),cudaMemcpyDeviceToHost); } __global__ void calculateAngularMomentum(glm::vec3 *pos,glm::vec3 *vel,glm::vec3 tmpPos,glm::vec3 *angular_momentum, glm::mat3x3 *inertia,float mass,int N){ int index=blockDim.x*blockIdx.x+threadIdx.x; if(index<N){ glm::vec3 r=pos[index]-tmpPos; angular_momentum[index]+=mass*glm::cross(r,vel[index]); glm::mat3x3 r_mat(1); r_mat[0][1]=r.z; r_mat[0][2]=-r.y; r_mat[1][0]=-r.z; r_mat[1][2]=r.x; r_mat[2][0]=r.y; r_mat[2][1]=-r.x; inertia[index]=r_mat*glm::transpose(r_mat)*mass; } } __global__ void calculateVelocityDamping(glm::vec3 *pos,glm::vec3 *vel,glm::vec3 tmpPos,glm::vec3 tmpVel, glm::vec3 angular_momentum,glm::mat3x3 inertia,float damping_coefficient,int N){ int index=blockDim.x*blockIdx.x+threadIdx.x; if(index<N){ glm::vec3 r=pos[index]-tmpPos; glm::vec3 angular_vel=glm::inverse(inertia)*angular_momentum; glm::vec3 delta_v=tmpVel+glm::cross(angular_vel,r)-vel[index]; vel[index]+=damping_coefficient*delta_v; } } void dampVelocityOnGPU() { if (std::abs(damping_coefficient) < 1e-15) return; cudaMemcpy(pos_temp1,dev_pos,dimension*sizeof(glm::vec3),cudaMemcpyDeviceToHost); cudaMemcpy(vel_temp1,dev_vel,dimension*sizeof(glm::vec3),cudaMemcpyDeviceToHost); thrust::inclusive_scan(pos_temp1,pos_temp1+dimension,pos_temp1); thrust::inclusive_scan(vel_temp1,vel_temp1+dimension,vel_temp1); calculateAngularMomentum<<<(dimension+255)/256,256>>>(dev_pos,dev_vel,pos_temp1[dimension-1]/(1.0f*dimension),dev_angular_momentum, dev_inertia,mass,dimension); cudaMemcpy(angular_momentum,dev_angular_momentum,dimension*sizeof(glm::vec3),cudaMemcpyDeviceToHost); cudaMemcpy(inertia,dev_inertia,dimension*sizeof(glm::mat3x3),cudaMemcpyDeviceToHost); thrust::inclusive_scan(angular_momentum,angular_momentum+dimension,angular_momentum); thrust::inclusive_scan(inertia,inertia+dimension,inertia); calculateVelocityDamping<<<(dimension+255)/256,256>>>(dev_pos,dev_vel,pos_temp1[dimension-1]/(1.0f*dimension),vel_temp1[dimension-1]/(1.0f*dimension), angular_momentum[dimension-1],inertia[dimension-1],damping_coefficient,dimension); cudaMemcpy(pos,dev_pos,dimension*sizeof(glm::vec3),cudaMemcpyDeviceToHost); } void integratePBDOnGPU(int ns,float dt) { for(int i=0;i<ns;++i){ vector_add_mulvector<<<(dimension+255)/256,256>>>(dev_vel,dev_external_force,dev_vel,dt*1.0/mass,dimension); vector_add_mulvector<<<(dimension+255)/256,256>>>(dev_pos,dev_vel,dev_pbd,dt,dimension); PBDProjectKernel<<<(constraintNum+255)/256,256>>>(dev_constraint,dev_pbd,dev_torn,dev_torn_id,tear_value,tearable,constraintNum,ns); vector_minus_vector_mul<<<(dimension+255)/256,256>>>(dev_pbd,dev_pos,dev_vel,1.0/(dt),dimension); vector_copy_vector<<<(dimension+255)/256,256>>>(dev_pos,dev_pbd,dimension); detectCollisionOnGPU(); resolveCollisionOnGPU(); } } //==================== integration ==================== void integrateExplicitEuler_GPU(float dt) { cudaMemset(dev_force, 0, dimension*sizeof(glm::vec3)); //compute force kern_compute_force << <(constraintNum + 255) / 256, 256 >> >(dev_force, dev_pos, dev_constraint, constraintNum); cudaDeviceSynchronize(); cudaCheckErrors("kernel fail"); //add external force vector_add_vector << <(dimension + 255) / 256, 256 >> > (dev_force,dev_external_force,dev_force,dimension); //pos vector_add_mulvector << <(dimension + 255) / 256, 256 >> > (dev_pos, dev_vel, dev_pos,dt,dimension ); //vel float dt_inv_mass = dt / mass; vector_add_mulvector << <(dimension + 255) / 256, 256 >> > (dev_vel, dev_force, dev_vel, dt_inv_mass, dimension); //clear the force mem //cudaMemset(dev_force, 0, dimension); cudaMemcpy(pos, dev_pos, dimension*sizeof(glm::vec3), cudaMemcpyDeviceToHost); } void integrateExplicitRK2_GPU(float dt) { cudaMemset(dev_force, 0, dimension*sizeof(glm::vec3)); //compute force kern_compute_force << <(constraintNum + 255) / 256, 256 >> >(dev_force, dev_pos, dev_constraint, constraintNum); cudaDeviceSynchronize(); cudaCheckErrors("kernel fail"); //add external force vector_add_vector << <(dimension + 255) / 256, 256 >> > (dev_force, dev_external_force, dev_force,dimension); //pos vector_add_mulvector << <(dimension + 255) / 256, 256 >> > (dev_pos, dev_vel, dev_pos_temp1, dt, dimension); //vel float dt_inv_mass = dt / mass; vector_add_mulvector << <(dimension + 255) / 256, 256 >> > (dev_vel, dev_force, dev_vel_temp1, dt_inv_mass, dimension); cudaMemset(dev_force,0,dimension*sizeof(glm::vec3)); kern_compute_force << <(constraintNum + 255) / 256, 256 >> >(dev_force, dev_pos_temp1, dev_constraint, constraintNum); //add external force vector_add_vector << <(dimension + 255) / 256, 256 >> > (dev_force, dev_external_force, dev_force,dimension); //pos vector_add_mulvector << <(dimension + 255) / 256, 256 >> > (dev_pos, dev_vel_temp1, dev_pos, dt, dimension); //vel vector_add_mulvector << <(dimension + 255) / 256, 256 >> > (dev_vel, dev_force, dev_vel, dt_inv_mass, dimension); //clear the force mem //cudaMemset(dev_force, 0, dimension); cudaMemcpy(pos, dev_pos, dimension*sizeof(glm::vec3), cudaMemcpyDeviceToHost); } void integrateExplicitRK4_GPU(float dt) { float half_dt = dt / 2; float inv_mass = 1.f / mass; //step1 cudaMemset(dev_force, 0, dimension*sizeof(glm::vec3)); //compute force kern_compute_force << <(constraintNum + 255) / 256, 256 >> >(dev_force, dev_pos, dev_constraint, constraintNum); cudaDeviceSynchronize(); //add external force vector_add_vector << <(dimension + 255) / 256, 256 >> > (dev_force, dev_external_force, dev_force,dimension); cudaMemcpy(dev_k1_x, dev_vel, dimension*sizeof(glm::vec3), cudaMemcpyDeviceToDevice); vector_mul_scalar << <(constraintNum + 255) / 256, 256 >> >(dev_k1_v, dev_force, inv_mass, dimension); vector_add_mulvector << <(constraintNum + 255) / 256, 256 >> >(dev_pos, dev_k1_x, dev_pos_temp1, half_dt, dimension); vector_add_mulvector << <(constraintNum + 255) / 256, 256 >> >(dev_vel, dev_k1_v, dev_vel_temp1, half_dt, dimension); //step 2 cudaMemset(dev_force, 0, dimension*sizeof(glm::vec3)); //compute force kern_compute_force << <(constraintNum + 255) / 256, 256 >> >(dev_force, dev_pos_temp1, dev_constraint, constraintNum); cudaDeviceSynchronize(); //add external force vector_add_vector << <(dimension + 255) / 256, 256 >> > (dev_force, dev_external_force, dev_force,dimension); cudaMemcpy(dev_k2_x, dev_vel_temp1, dimension*sizeof(glm::vec3), cudaMemcpyDeviceToDevice); vector_mul_scalar << <(constraintNum + 255) / 256, 256 >> >(dev_k2_v, dev_force, inv_mass, dimension); vector_add_mulvector << <(constraintNum + 255) / 256, 256 >> >(dev_pos, dev_k2_x, dev_pos_temp1, half_dt, dimension); vector_add_mulvector << <(constraintNum + 255) / 256, 256 >> >(dev_vel, dev_k2_v, dev_vel_temp1, half_dt, dimension); //step3 cudaMemset(dev_force, 0, dimension*sizeof(glm::vec3)); //compute force kern_compute_force << <(constraintNum + 255) / 256, 256 >> >(dev_force, dev_pos_temp1, dev_constraint, constraintNum); cudaDeviceSynchronize(); //add external force vector_add_vector << <(dimension + 255) / 256, 256 >> > (dev_force, dev_external_force, dev_force,dimension); cudaMemcpy(dev_k3_x, dev_vel_temp1, dimension*sizeof(glm::vec3), cudaMemcpyDeviceToDevice); vector_mul_scalar << <(constraintNum + 255) / 256, 256 >> >(dev_k3_v, dev_force, inv_mass, dimension); vector_add_mulvector << <(constraintNum + 255) / 256, 256 >> >(dev_pos, dev_k2_x, dev_pos_temp1, dt, dimension); vector_add_mulvector << <(constraintNum + 255) / 256, 256 >> >(dev_vel, dev_k2_v, dev_vel_temp1, dt, dimension); //step4 cudaMemset(dev_force, 0, dimension*sizeof(glm::vec3)); //compute force kern_compute_force << <(constraintNum + 255) / 256, 256 >> >(dev_force, dev_pos_temp1, dev_constraint, constraintNum); cudaDeviceSynchronize(); //add external force vector_add_vector << <(dimension + 255) / 256, 256 >> > (dev_force, dev_external_force, dev_force,dimension); cudaMemcpy(dev_k4_x, dev_vel_temp1, dimension*sizeof(glm::vec3), cudaMemcpyDeviceToDevice); vector_mul_scalar << <(constraintNum + 255) / 256, 256 >> >(dev_k4_v, dev_force, inv_mass, dimension); //all together float a = dt / 6.f; kern_RK4_computation << <(constraintNum + 255) / 256, 256 >> >(dev_pos, dev_k1_x, dev_k2_x, dev_k3_x, dev_k4_x, a, dimension); kern_RK4_computation << <(constraintNum + 255) / 256, 256 >> >(dev_vel, dev_k1_v, dev_k2_v, dev_k3_v, dev_k4_v, a, dimension); cudaMemcpy(pos, dev_pos, dimension*sizeof(glm::vec3), cudaMemcpyDeviceToHost); } void integrateImplicitBW_GPU(float dt) { cudaMemset(dev_force, 0, dimension*sizeof(glm::vec3)); //compute force kern_compute_force << <(constraintNum + 255) / 256, 256 >> >(dev_force, dev_pos, dev_constraint, constraintNum); cudaDeviceSynchronize(); //add external force vector_add_vector << <(dimension + 255) / 256, 256 >> > (dev_force, dev_external_force, dev_force, dimension); //convert to implicit data convert_2_implicit_data << <(dimension + 255) / 256, 256 >> > (dev_vel,dev_vel_implicit,dimension); convert_2_implicit_data << <(dimension + 255) / 256, 256 >> > (dev_force, dev_force_implicit, dimension); compute_b << <(3 * dimension + 255) / 256, 256 >> > (mass, dt, dev_vel_implicit, dev_force_implicit, dev_b_implicit, 3 * dimension); // --- create library handles: cusolverSpHandle_t cusolver_handle; cusolverStatus_t cusolver_status; cusolver_status = cusolverSpCreate(&cusolver_handle); //std::cout << "status create cusolver handle: " << cusolver_status << std::endl; cusparseHandle_t cusparse_handle; cusparseStatus_t cusparse_status; cusparse_status = cusparseCreate(&cusparse_handle); //std::cout << "status create cusparse handle: " << cusparse_status << std::endl; cusparseMatDescr_t descrA; cusparse_status = cusparseCreateMatDescr(&descrA); //std::cout << "status cusparse createMatDescr: " << cusparse_status << std::endl; //solving float tol = 1e-3; int reorder = 0; int singularity = 0; //std::cout << dev_nnz << std::endl; cusolver_status = cusolverSpScsrlsvchol(cusolver_handle, 3 * dimension, dev_nnz, descrA, dev_Val, dev_csr_Rows, dev_Cols, dev_b_implicit, tol, reorder, dev_vel_implicit, &singularity); cudaDeviceSynchronize(); //std::cout << "singularity (should be -1): " << singularity << std::endl; //std::cout << "status cusolver solving (!): " << cusolver_status << std::endl; // relocated these 2 lines from above to solve (2): cusparse_status = cusparseDestroy(cusparse_handle); //std::cout << "status destroy cusparse handle: " << cusparse_status << std::endl; cusolver_status = cusolverSpDestroy(cusolver_handle); //std::cout << "status destroy cusolver handle: " << cusolver_status << std::endl; //convert the data back inv_convert_2_implicit_data << <(dimension + 255) / 256, 256 >> > (dev_vel_implicit, dev_vel, dimension); vector_add_mulvector << <(dimension + 255) / 256, 256 >> >(dev_pos, dev_vel, dev_pos, dt, dimension); } //==================== integration ==================== kdtree *initTree(kdtree *root){ //postorder method to first get the left and right child on GPU Memory, then replace it with the memory on CPU, then copy the whole point to GPU if(root==nullptr) return nullptr; kdtree *dev_lc=initTree(root->lc); kdtree *dev_rc=initTree(root->rc); kdtree *tmp=new kdtree(root); tmp->lc=dev_lc; tmp->rc=dev_rc; kdtree *dev_root; cudaMalloc(&dev_root,sizeof(kdtree)); cudaMemcpy(dev_root,tmp,sizeof(kdtree),cudaMemcpyHostToDevice); return dev_root; } void updateAttachmentConstraintOnGPU(GPUConstraint *Gconstraint,int n){ cudaMemset(dev_constraint+springConstraintNum,0,100*sizeof(GPUConstraint)); n=min(100,n);//no more than 100 Attachment Constraint cudaMemcpy(dev_constraint+springConstraintNum,Gconstraint,n*sizeof(GPUConstraint),cudaMemcpyHostToDevice); constraintNum=springConstraintNum+n; } void convertSystemMatrix(std::vector<int> &host_Rows, std::vector<int> &host_Cols, std::vector<float> &host_Val) { //step1 convert to coo format int nnz = host_Val.size(); dev_nnz = nnz; cudaMalloc((void**)&dev_Val, nnz*sizeof(float)); cudaMalloc((void**)&dev_coo_Rows, nnz*sizeof(int)); cudaMalloc((void**)&dev_csr_Rows, (dimension*3 + 1)*sizeof(int)); cudaMalloc((void**)&dev_Cols, nnz*sizeof(int)); cudaMemcpy(dev_Val, host_Val.data(), nnz*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dev_coo_Rows, host_Rows.data(), nnz*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_Cols, host_Cols.data(), nnz*sizeof(int), cudaMemcpyHostToDevice); //std::vector<float> hst_rows(nnz, 0); //cudaMemcpy(hst_rows.data(), dev_Val, nnz*sizeof(float), cudaMemcpyDeviceToHost); //for (int i = 0; i < 10; i++) //{ // std::cout << hst_rows[i] << std::endl; //} cusparseHandle_t cusparse_handle; cusparseStatus_t cusparse_status; cusparse_status = cusparseCreate(&cusparse_handle); std::cout << "status create cusparse handle: " << cusparse_status << std::endl; cusparse_status = cusparseXcoo2csr(cusparse_handle, dev_coo_Rows, nnz, dimension*3, dev_csr_Rows, CUSPARSE_INDEX_BASE_ZERO); std::cout << "status cusparse coo2csr conversion: " << cusparse_status << std::endl; cudaDeviceSynchronize(); // matrix format conversion has to be finished! //check the matrix //cusparseMatDescr_t descrA; //cusparse_status = cusparseCreateMatDescr(&descrA); //std::cout << "status cusparse createMatDescr: " << cusparse_status << std::endl; // //std::vector<float> A(dimension * 3 * dimension * 3, 0); //float *dA; //cudaMalloc((void**)&dA, A.size()*sizeof(float)); //cusparseScsr2dense(cusparse_handle, dimension * 3, dimension * 3, descrA, dev_Val, // dev_csr_Rows, dev_Cols, dA, dimension * 3); //cudaMemcpy(A.data(), dA, A.size()*sizeof(float), cudaMemcpyDeviceToHost); //std::cout << "A: \n"; //for (int i = 0; i < 10; ++i) { // for (int j = 0; j < 10; ++j) { // std::cout << A[i*dimension * 3 + j] << " "; // } // std::cout << std::endl; //} //cudaFree(dA); cusparse_status = cusparseDestroy(cusparse_handle); std::cout << "status destroy cusparse handle: " << cusparse_status << std::endl; } glm::vec3 *getPos(){ return pos; } glm::vec3 *getVel(){ return vel; } bool isTorn(){ cudaMemcpy(&torn,dev_torn,sizeof(int),cudaMemcpyDeviceToHost); if(torn==1){ cudaMemset(dev_torn,0,sizeof(int)); return true; } return false; } int *getTornId(){ cudaMemcpy(torn_id,dev_torn_id,triangleNum*sizeof(int),cudaMemcpyDeviceToHost); return torn_id; } void resetTornFlag(){ torn=0; cudaMemset(dev_torn,0,sizeof(int)); } __global__ void updatePrimitivePostionOnGPU(GPUPrimitive *primitive,glm::vec3 *newPos,int N){ int index=blockIdx.x*blockDim.x+threadIdx.x; if(index<N){ primitive[index].pos=newPos[index]; } } void updatePrimitivePosition(glm::vec3 *newPos){ cudaMemcpy(dev_primitive_pos,newPos,primitiveNum*sizeof(glm::vec3),cudaMemcpyHostToDevice); updatePrimitivePostionOnGPU<<<(primitiveNum+255)/256,256>>>(dev_primitive,dev_primitive_pos,primitiveNum); } /* test cuda core function */ __global__ void test(int *a,int *b,int *c,int N){ int index=blockIdx.x*blockDim.x+threadIdx.x; if(index<N){ c[index]=a[index]+b[index]; } } /* test function for cuda setup */ void testCuda(){ int *a,*b,*c; int *dev_a,*dev_b,*dev_c; a=new int[10]; b=new int[10]; c=new int[10]; for(int i=0;i<10;++i){ a[i]=i; b[i]=10-i; } cudaMalloc(&dev_a,10*sizeof(int)); cudaMalloc(&dev_b,10*sizeof(int)); cudaMalloc(&dev_c,10*sizeof(int)); cudaMemcpy(dev_a,a,10*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(dev_b,b,10*sizeof(int),cudaMemcpyHostToDevice); test<<<1,256>>>(dev_a,dev_b,dev_c,10); cudaMemcpy(c,dev_c,10*sizeof(int),cudaMemcpyDeviceToHost); for(int i=0;i<10;++i){ std::cout<<a[i]<<","<<b[i]<<","<<c[i]<<std::endl; } }
38a607a4ade90176a388617a9a192a0f0fa06896.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "../parallel/common/unified_memory_management.cuh" #include "../parallel/common/statistics.h" double river_heads[] = RIVER_HEADS; __global__ void simulation_step_kernel(struct CA d_ca, double *d_write_head, double river_head) { unsigned idx_x = blockIdx.x * blockDim.x + threadIdx.x; unsigned idx_y = blockIdx.y * blockDim.y + threadIdx.y; unsigned idx_g = idx_y * COLS + idx_x; double Q, diff_head, tmp_t, ht1, ht2; if (idx_x < COLS && idx_y < ROWS) { if (idx_y != 0 && idx_y != ROWS - 1) { Q = 0; if (idx_x >= 1) { diff_head = d_ca.heads[idx_g - 1] - d_ca.heads[idx_g]; tmp_t = d_ca.K[idx_g] * THICKNESS; Q += diff_head * tmp_t; } if (idx_y >= 1) { diff_head = d_ca.heads[(idx_y - 1) * COLS + idx_x] - d_ca.heads[idx_g]; tmp_t = d_ca.K[idx_g] * THICKNESS; Q += diff_head * tmp_t; } if (idx_x + 1 < COLS) { diff_head = d_ca.heads[idx_g + 1] - d_ca.heads[idx_g]; tmp_t = d_ca.K[idx_g] * THICKNESS; Q += diff_head * tmp_t; } if (idx_y + 1 < ROWS) { diff_head = d_ca.heads[(idx_y + 1) * COLS + idx_x] - d_ca.heads[idx_g]; tmp_t = d_ca.K[idx_g] * THICKNESS; Q += diff_head * tmp_t; } if (idx_y == RIVER_POSITION) { double first_term_Q = (KSB * CELL_SIZE_X * W) / M; if (d_ca.heads[idx_g] > RIVER_BOTTOM) { Q += first_term_Q * (river_head - d_ca.heads[idx_g]); } else { Q += first_term_Q * (river_head - RIVER_BOTTOM + M); } } Q -= d_ca.sources[idx_g]; ht1 = Q * DELTA_T; ht2 = AREA * d_ca.Sy[idx_g]; d_write_head[idx_g] = d_ca.heads[idx_g] + ht1 / ht2; } } } void perform_simulation_on_GPU() { dim3 blockDim(BLOCK_SIZE, BLOCK_SIZE); const int blockCount = ceil((ROWS * COLS) / (BLOCK_SIZE * BLOCK_SIZE)); double gridSize = ceil(sqrt(blockCount)); dim3 gridDim(gridSize, gridSize); int day_counter, steps_in_current_day = 0; double river_head; for (int i = 0; i < SIMULATION_STEPS; i++ && steps_in_current_day++) { river_head = river_heads[day_counter]; simulation_step_kernel << < gridDim, blockDim >> > (d_read, d_write.heads, river_head); bool is_new_day = steps_in_current_day * DELTA_T >= SECONDS_IN_DAY; if (is_new_day) { day_counter++; is_new_day = false; steps_in_current_day = 0; if (WRITE_OUTPUT_TO_FILE) { saveRiverHeadsInFile(d_write.heads, river_head, day_counter); } } hipDeviceSynchronize(); double *tmp1 = d_write.heads; d_write.heads = d_read.heads; d_read.heads = tmp1; } if (WRITE_OUTPUT_TO_FILE) { day_counter++; saveRiverHeadsInFile(d_write.heads, river_head, day_counter); } } int main() { allocate_memory(); init_read_ca(); init_write_head(); perform_simulation_on_GPU(); return 0; }
38a607a4ade90176a388617a9a192a0f0fa06896.cu
#include "../parallel/common/unified_memory_management.cuh" #include "../parallel/common/statistics.h" double river_heads[] = RIVER_HEADS; __global__ void simulation_step_kernel(struct CA d_ca, double *d_write_head, double river_head) { unsigned idx_x = blockIdx.x * blockDim.x + threadIdx.x; unsigned idx_y = blockIdx.y * blockDim.y + threadIdx.y; unsigned idx_g = idx_y * COLS + idx_x; double Q, diff_head, tmp_t, ht1, ht2; if (idx_x < COLS && idx_y < ROWS) { if (idx_y != 0 && idx_y != ROWS - 1) { Q = 0; if (idx_x >= 1) { diff_head = d_ca.heads[idx_g - 1] - d_ca.heads[idx_g]; tmp_t = d_ca.K[idx_g] * THICKNESS; Q += diff_head * tmp_t; } if (idx_y >= 1) { diff_head = d_ca.heads[(idx_y - 1) * COLS + idx_x] - d_ca.heads[idx_g]; tmp_t = d_ca.K[idx_g] * THICKNESS; Q += diff_head * tmp_t; } if (idx_x + 1 < COLS) { diff_head = d_ca.heads[idx_g + 1] - d_ca.heads[idx_g]; tmp_t = d_ca.K[idx_g] * THICKNESS; Q += diff_head * tmp_t; } if (idx_y + 1 < ROWS) { diff_head = d_ca.heads[(idx_y + 1) * COLS + idx_x] - d_ca.heads[idx_g]; tmp_t = d_ca.K[idx_g] * THICKNESS; Q += diff_head * tmp_t; } if (idx_y == RIVER_POSITION) { double first_term_Q = (KSB * CELL_SIZE_X * W) / M; if (d_ca.heads[idx_g] > RIVER_BOTTOM) { Q += first_term_Q * (river_head - d_ca.heads[idx_g]); } else { Q += first_term_Q * (river_head - RIVER_BOTTOM + M); } } Q -= d_ca.sources[idx_g]; ht1 = Q * DELTA_T; ht2 = AREA * d_ca.Sy[idx_g]; d_write_head[idx_g] = d_ca.heads[idx_g] + ht1 / ht2; } } } void perform_simulation_on_GPU() { dim3 blockDim(BLOCK_SIZE, BLOCK_SIZE); const int blockCount = ceil((ROWS * COLS) / (BLOCK_SIZE * BLOCK_SIZE)); double gridSize = ceil(sqrt(blockCount)); dim3 gridDim(gridSize, gridSize); int day_counter, steps_in_current_day = 0; double river_head; for (int i = 0; i < SIMULATION_STEPS; i++ && steps_in_current_day++) { river_head = river_heads[day_counter]; simulation_step_kernel << < gridDim, blockDim >> > (d_read, d_write.heads, river_head); bool is_new_day = steps_in_current_day * DELTA_T >= SECONDS_IN_DAY; if (is_new_day) { day_counter++; is_new_day = false; steps_in_current_day = 0; if (WRITE_OUTPUT_TO_FILE) { saveRiverHeadsInFile(d_write.heads, river_head, day_counter); } } cudaDeviceSynchronize(); double *tmp1 = d_write.heads; d_write.heads = d_read.heads; d_read.heads = tmp1; } if (WRITE_OUTPUT_TO_FILE) { day_counter++; saveRiverHeadsInFile(d_write.heads, river_head, day_counter); } } int main() { allocate_memory(); init_read_ca(); init_write_head(); perform_simulation_on_GPU(); return 0; }
scan_v1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "scan.h" //////////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////// /* cuda parallelized prefix sum !!! */ __global__ void scan_v1_kernel(float *d_output, float *d_input, int length) { int idx = blockDim.x * blockIdx.x + threadIdx.x; float element = 0.f; for (int offset = 0; offset < length; offset++) { if (idx - offset >= 0) element += d_input[idx - offset]; } d_output[idx] = element; } //////////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////// void scan_v1(float *d_output, float *d_input, int length) { dim3 dimBlock(BLOCK_DIM); dim3 dimGrid((length + BLOCK_DIM - 1) / BLOCK_DIM); hipLaunchKernelGGL(( scan_v1_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_output, d_input, length); }
scan_v1.cu
#include "scan.h" //////////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////// /* cuda parallelized prefix sum !!! */ __global__ void scan_v1_kernel(float *d_output, float *d_input, int length) { int idx = blockDim.x * blockIdx.x + threadIdx.x; float element = 0.f; for (int offset = 0; offset < length; offset++) { if (idx - offset >= 0) element += d_input[idx - offset]; } d_output[idx] = element; } //////////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////// void scan_v1(float *d_output, float *d_input, int length) { dim3 dimBlock(BLOCK_DIM); dim3 dimGrid((length + BLOCK_DIM - 1) / BLOCK_DIM); scan_v1_kernel<<<dimGrid, dimBlock>>>(d_output, d_input, length); }
6f096e8e1028ecb486aa552e60c5bdf9b6bd6495.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* ****************************************************************************** * * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * See the NOTICE file distributed with this work for additional * information regarding copyright ownership. * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // #include <helpers/DebugHelper.h> #include <loops/legacy_ops.h> #include <loops/transform_strict.h> #include <system/Environment.h> #include <system/op_boilerplate.h> #include <types/types.h> using namespace simdOps; template <typename X, typename OpType> SD_KERNEL void transformStrictSimple(const void *x, const sd::LongType *xShapeInfo, int xRank, void *params, void *z, const sd::LongType *zShapeInfo, int zRank, int *allocationPointer, void *reductionPointer, const sd::LongType *tadShapeInfo, const sd::LongType *tadOffsets) { functions::transform::TransformStrict<X>::template transformCuda<OpType>( x, xShapeInfo, params, z, zShapeInfo, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets); } namespace functions { namespace transform { template <typename X> SD_HOST void TransformStrict<X>::executeTransformShaped(dim3 launchDims, hipStream_t *stream, const int opNum, const void *x, const sd::LongType *xShape, int xRank, void *extraParams, void *z, const sd::LongType *zShape, int zRank, int *allocationPointer, void *reductionPointer, const sd::LongType *tadShapeInfo, const sd::LongType *tadOffsets) { DISPATCH_BY_OPNUM_T(intermediateShaped, PARAMS(launchDims, stream, x, xShape, xRank, extraParams, z, zShape, zRank, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets), TRANSFORM_STRICT_OPS); DEBUG_KERNEL(stream, opNum); } template <typename X> template <typename OpType> SD_DEVICE void TransformStrict<X>::transformCuda(const void *vx, const sd::LongType *xShapeInfo, void *vparams, void *vz, const sd::LongType *zShapeInfo, int *allocationPointer, void *vreductionPointer, const sd::LongType *tadShapeInfo, const sd::LongType *tadOffsets) { auto x = static_cast<const X *>(vx); auto z = static_cast<X *>(vz); auto params = static_cast<X *>(vparams); auto reductionPointer = static_cast<X *>(vreductionPointer); if (OpType::requiresSpecial) { OpType::execSpecialCuda(x, xShapeInfo, z, zShapeInfo, params, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets); return; } else { __shared__ sd::LongType xEws; __shared__ sd::LongType zEws; __shared__ char xOrder; __shared__ char zOrder; __shared__ sd::LongType length; if (threadIdx.x == 0) { xEws = shape::elementWiseStride(xShapeInfo); zEws = shape::elementWiseStride(zShapeInfo); xOrder = shape::order(xShapeInfo); zOrder = shape::order(zShapeInfo); length = shape::length(xShapeInfo); } __syncthreads(); auto tid = blockIdx.x * blockDim.x + threadIdx.x; int totalThreads = gridDim.x * blockDim.x; if (xEws > 0 && zEws > 0 && xOrder == zOrder && xOrder == 'c') { for (int i = tid; i < length; i += totalThreads) z[i * zEws] = OpType::op(x[i * xEws], params); } else { if (vx == vz) { for (sd::LongType i = tid; i < length; i += totalThreads) { auto xOffset = shape::getIndexOffset(i, xShapeInfo); z[xOffset] = OpType::op(x[xOffset], params); } } else { for (sd::LongType i = tid; i < length; i += totalThreads) { auto xOffset = shape::getIndexOffset(i, xShapeInfo); auto zOffset = shape::getIndexOffset(i, zShapeInfo); z[zOffset] = OpType::op(x[xOffset], params); } } } } }; template <typename X> template <typename OpType> SD_HOST void TransformStrict<X>::intermediateShaped(dim3 launchDims, hipStream_t *stream, const void *x, const sd::LongType *xShape, int xRank, void *extraParams, void *z, const sd::LongType *zShape, int zRank, int *allocationPointer, void *reductionPointer, const sd::LongType *tadShapeInfo, const sd::LongType *tadOffsets) { hipLaunchKernelGGL(( transformStrictSimple<X, OpType>), dim3(launchDims.x), dim3(launchDims.x), launchDims.z, *stream, x, xShape, xRank, extraParams, z, zShape, zRank, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets); sd::DebugHelper::checkErrorCode(stream, "transformStrict(...) failed"); } BUILD_SINGLE_TEMPLATE(template class TransformStrict, , SD_FLOAT_TYPES); } // namespace transform } // namespace functions
6f096e8e1028ecb486aa552e60c5bdf9b6bd6495.cu
/* ****************************************************************************** * * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * See the NOTICE file distributed with this work for additional * information regarding copyright ownership. * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // #include <helpers/DebugHelper.h> #include <loops/legacy_ops.h> #include <loops/transform_strict.h> #include <system/Environment.h> #include <system/op_boilerplate.h> #include <types/types.h> using namespace simdOps; template <typename X, typename OpType> SD_KERNEL void transformStrictSimple(const void *x, const sd::LongType *xShapeInfo, int xRank, void *params, void *z, const sd::LongType *zShapeInfo, int zRank, int *allocationPointer, void *reductionPointer, const sd::LongType *tadShapeInfo, const sd::LongType *tadOffsets) { functions::transform::TransformStrict<X>::template transformCuda<OpType>( x, xShapeInfo, params, z, zShapeInfo, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets); } namespace functions { namespace transform { template <typename X> SD_HOST void TransformStrict<X>::executeTransformShaped(dim3 launchDims, cudaStream_t *stream, const int opNum, const void *x, const sd::LongType *xShape, int xRank, void *extraParams, void *z, const sd::LongType *zShape, int zRank, int *allocationPointer, void *reductionPointer, const sd::LongType *tadShapeInfo, const sd::LongType *tadOffsets) { DISPATCH_BY_OPNUM_T(intermediateShaped, PARAMS(launchDims, stream, x, xShape, xRank, extraParams, z, zShape, zRank, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets), TRANSFORM_STRICT_OPS); DEBUG_KERNEL(stream, opNum); } template <typename X> template <typename OpType> SD_DEVICE void TransformStrict<X>::transformCuda(const void *vx, const sd::LongType *xShapeInfo, void *vparams, void *vz, const sd::LongType *zShapeInfo, int *allocationPointer, void *vreductionPointer, const sd::LongType *tadShapeInfo, const sd::LongType *tadOffsets) { auto x = static_cast<const X *>(vx); auto z = static_cast<X *>(vz); auto params = static_cast<X *>(vparams); auto reductionPointer = static_cast<X *>(vreductionPointer); if (OpType::requiresSpecial) { OpType::execSpecialCuda(x, xShapeInfo, z, zShapeInfo, params, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets); return; } else { __shared__ sd::LongType xEws; __shared__ sd::LongType zEws; __shared__ char xOrder; __shared__ char zOrder; __shared__ sd::LongType length; if (threadIdx.x == 0) { xEws = shape::elementWiseStride(xShapeInfo); zEws = shape::elementWiseStride(zShapeInfo); xOrder = shape::order(xShapeInfo); zOrder = shape::order(zShapeInfo); length = shape::length(xShapeInfo); } __syncthreads(); auto tid = blockIdx.x * blockDim.x + threadIdx.x; int totalThreads = gridDim.x * blockDim.x; if (xEws > 0 && zEws > 0 && xOrder == zOrder && xOrder == 'c') { for (int i = tid; i < length; i += totalThreads) z[i * zEws] = OpType::op(x[i * xEws], params); } else { if (vx == vz) { for (sd::LongType i = tid; i < length; i += totalThreads) { auto xOffset = shape::getIndexOffset(i, xShapeInfo); z[xOffset] = OpType::op(x[xOffset], params); } } else { for (sd::LongType i = tid; i < length; i += totalThreads) { auto xOffset = shape::getIndexOffset(i, xShapeInfo); auto zOffset = shape::getIndexOffset(i, zShapeInfo); z[zOffset] = OpType::op(x[xOffset], params); } } } } }; template <typename X> template <typename OpType> SD_HOST void TransformStrict<X>::intermediateShaped(dim3 launchDims, cudaStream_t *stream, const void *x, const sd::LongType *xShape, int xRank, void *extraParams, void *z, const sd::LongType *zShape, int zRank, int *allocationPointer, void *reductionPointer, const sd::LongType *tadShapeInfo, const sd::LongType *tadOffsets) { transformStrictSimple<X, OpType><<<launchDims.x, launchDims.x, launchDims.z, *stream>>>( x, xShape, xRank, extraParams, z, zShape, zRank, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets); sd::DebugHelper::checkErrorCode(stream, "transformStrict(...) failed"); } BUILD_SINGLE_TEMPLATE(template class TransformStrict, , SD_FLOAT_TYPES); } // namespace transform } // namespace functions
ac7f4d4b8505038fca0eaa3c485709429278469e.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <GL/gl.h> #include <GL/glut.h> #include <math.h> #include <stdbool.h> #include <omp.h> #include <hip/hip_runtime.h> #include <helper_cuda.h> #include <helper_functions.h> #include <hip/hip_runtime.h> #define PI 3.141592653589793 #define cap 1000 #define ref 0.4 #define temp 4273 #define visc 9 #define GRAV (6.674*0.00000000000000000001) #define density (2.5 * 1000000000000) #define rad 20 #define dev 12 #define M (4 / 3 * PI * rad*rad*rad* density) #define X 0 #define Y 1 #define Z 2 #define ANIM 1000000 #define scale 0.01 #define colmargin 1.05 #define R (rad * scale) #define INIT_WIDTH 800 #define INIT_HEIGHT 800 #define vision 40 #define Grid_x 1 //block #define Grid_y 1 #define Grid_z 1 #define Block_x 64 #define Block_y 8 #define Block_z 1 #define NUM_POINTS (Grid_x*Grid_y*Grid_z*Block_x*Block_y*Block_z) unsigned int num_points = (dev + 1) * (dev + 1); unsigned int window_width = INIT_WIDTH; unsigned int window_height = INIT_HEIGHT; double vision_size = vision; float right_motion=0; float up_motion=0; double left, right, bottom, top; float h_point[NUM_POINTS][3]; float v_point[NUM_POINTS][3]; float st_point[NUM_POINTS]; float e_point[NUM_POINTS]; float T_point[NUM_POINTS]; float J_point[NUM_POINTS]; float anim_time = ANIM; float anim_dt = 0.1; double phi = 30.0; double theta = 30.0; float light_pos[4]; int mouse_old_x, mouse_old_y; bool motion_p; bool motion_w; double eye[3]; double center[3] = {0.0, 0.0, 0.0}; double up[3]; double ** point; float (*d_point)[3]; float (*dv_point)[3]; float (*dst_point); float (*de_point); float (*dT_point); float (*dJ_point); __global__ void grav_v(float (*pos)[3], float(*vec)[3] ,float(*sti),float(*e),float(*T),float(*J), float time, float dt); __global__ void grav_p(float (*pos)[3], float(*vec)[3] , float time, float dt); // double dot(double vec0[], double vec1[]) { return(vec0[X] * vec1[X] + vec0[Y] * vec1[Y] + vec0[Z] * vec1[Z]); } void cross(double vec0[], double vec1[], double vec2[]) { vec2[X] = vec0[Y] * vec1[Z] - vec0[Z] * vec1[Y]; vec2[Y] = vec0[Z] * vec1[X] - vec0[X] * vec1[Z]; vec2[Z] = vec0[X] * vec1[Y] - vec0[Y] * vec1[X]; } void normVec(double vec[]) { double norm; norm = sqrt(vec[X] * vec[X] + vec[Y] * vec[Y] + vec[Z] * vec[Z]); vec[X] /= norm; vec[Y] /= norm; vec[Z] /= norm; } void normal(double p0[], double p1[], double p2[], double normal[]) { unsigned int i; double v0[3], v1[3]; for (i = 0; i < 3; i++) { v0[i] = p2[i] - p1[i]; v1[i] = p0[i] - p1[i]; } cross(v0, v1, normal); normVec(normal); } // __global__ void grav_v(float (*pos)[3],float(*vec)[3],float(*sti),float(*e),float(*T),float(*J), float time, float dt) { double xn,yn,zn,vx,vy,vz,dis,sq; unsigned int thread_idx = threadIdx.x+blockDim.x*blockIdx.x; unsigned int thread_idy = threadIdx.y+blockDim.y*blockIdx.y; unsigned int thread_idz = threadIdx.z+blockDim.z*blockIdx.z; unsigned int index = (blockDim.x * Grid_x) * (blockDim.y * Grid_y) * thread_idz + (blockDim.x * Grid_x) * thread_idy + thread_idx ; double v_buff[3]={0}; double coltime[NUM_POINTS][2]={0}; int colnum=0; double gravity=0; xn = pos[index][0]; yn = pos[index][1]; zn = pos[index][2]; vx = vec[index][0]; vy = vec[index][1]; vz = vec[index][2]; v_buff[0]=vx; v_buff[1]=vy; v_buff[2]=vz; for (int i = 0 ; i < NUM_POINTS; i++) { sq = pow((double)(xn-pos[i][0]),2) + pow((double)(yn-pos[i][1]),2) + pow((double)(zn-pos[i][2]),2); gravity=GRAV*M/sq*scale*scale; dis = sqrt(sq); // if (dis > 2 * R * colmargin && i != index) { // J[index]-=0.5*M*fabs((float)(pow((double)vec[i][0]/scale,2)+pow((double)vec[i][1]/scale,2)+pow((double)vec[i][2]/scale,2))-(float)(pow((double)(vec[i][0]/scale +((pos[i][0]-xn)/dis)*gravity*ANIM),2)+pow((double)(vec[i][1]/scale + ((pos[i][1]-yn)/dis)*gravity*ANIM),2)+pow((double)(vec[i][2]/scale + ((pos[i][2]-zn)/dis)*gravity*ANIM),2))); // vx = vx + ((pos[i][0]-xn)/dis)*gravity*ANIM*scale; vy = vy + ((pos[i][1]-yn)/dis)*gravity*ANIM*scale; vz = vz + ((pos[i][2]-zn)/dis)*gravity*ANIM*scale; } else if (i != index) { //TBD coltime[i][1]=i; coltime[i][0]=(2*R*colmargin - dis)/(pow((double)(vx-vec[i][0]),2)+pow((double)(vy-vec[i][1]),2)+pow((double)(vz-vec[i][2]),2)); colnum++; } } __syncthreads(); if(colnum>0) { // double tmp[2]={0}; for (int i = 0 ; i < NUM_POINTS; i++){ for(int j = i+1; j < NUM_POINTS; j++){ if(coltime[i][0] > coltime[j][0]){ tmp[0]=coltime[i][0]; tmp[1]=coltime[i][1]; coltime[i][0]=coltime[j][0]; coltime[i][1]=coltime[j][1]; coltime[j][0]=tmp[0]; coltime[j][1]=tmp[1]; } } } // for (int i=NUM_POINTS-1 ; i>=NUM_POINTS-colnum; i--){ int colindex=coltime[i][1]; float repul=0; if (colindex != index) { repul=e[index]; // if (e[colindex] < e[index]) { repul=e[colindex]; } // v_buff[0]=(double)((1+repul)*M*vec[colindex][0]+(M-repul*M)*v_buff[0])/(M+M); v_buff[1]=(double)((1+repul)*M*vec[colindex][1]+(M-repul*M)*v_buff[1])/(M+M); v_buff[2]=(double)((1+repul)*M*vec[colindex][2]+(M-repul*M)*v_buff[2])/(M+M); //sti double Energy=0.5*(1-repul*repul)*(M*(pow((double)vx/scale,2)+pow((double)vy/scale,2)+pow((double)vz/scale,2)) + M*(pow((double)vec[colindex][0]/scale,2)+pow((double)vec[colindex][1]/scale,2)+pow((double)vec[colindex][2]/scale,2))); //J[index]+=Energy * pow(10.0,(double)sti[index]) / pow(10.0,(double)sti[index]) + pow(10.0,(double)sti[colindex]); J[index]+=Energy * (double)sti[index] / ((double)sti[index] + (double)sti[colindex]); T[index]=(J[index]-0.5*M*(pow((double)v_buff[0],2)+pow((double)v_buff[1],2)+pow((double)v_buff[2],2)))/M/cap; // //e[index] = e[index] * (visc+(temp/100)-(T[index]/100)-0.5*log(M))/(visc-0.5*log(M)); e[index] = e[index] * (visc+(temp/100)-(T[index]/100))/(visc); sti[index] = visc - ((T[index] - temp) / 100); } } } __syncthreads(); if (colnum>0) { vec[index][0] = (float)v_buff[0]; vec[index][1] = (float)v_buff[1]; vec[index][2] = (float)v_buff[2]; } else { vec[index][0] = (float)vx; vec[index][1] = (float)vy; vec[index][2] = (float)vz; } // T[index]=(J[index]-0.5*M*(pow((double)vec[index][0]/scale,2)+pow((double)vec[index][1]/scale,2)+pow((double)vec[index][2]/scale,2)))/M/cap; // //e[index] = e[index] * (visc+(temp/100)-(T[index]/100)-0.5*log(M))/(visc-0.5*log(M)); e[index] = e[index] * (visc+(temp/100)-(T[index]/100))/(visc); sti[index] = visc - ((T[index] - temp) / 100); } // __global__ void grav_p(float (*pos)[3], float(*vec)[3] , float time, float dt) { double xn,yn,zn,vx,vy,vz; unsigned int thread_idx = threadIdx.x+blockDim.x*blockIdx.x; unsigned int thread_idy = threadIdx.y+blockDim.y*blockIdx.y; unsigned int thread_idz = threadIdx.z+blockDim.z*blockIdx.z; unsigned int index = ( blockDim.x * (Grid_x - 1) + blockDim.x ) * ( blockDim.y * (Grid_y - 1) + blockDim.y ) * thread_idz + ( blockDim.x * (Grid_x - 1) + blockDim.x ) * thread_idy + thread_idx ; xn = pos[index][0]; yn = pos[index][1]; zn = pos[index][2]; vx = vec[index][0]; vy = vec[index][1]; vz = vec[index][2]; pos[index][0] = xn + vx * dt; pos[index][1] = yn + vy * dt; pos[index][2] = zn + vz * dt; } // void setInitialPosition(void) { for (int i = 0; i < NUM_POINTS; i++) { h_point[i][0] = ((double)rand()-(double)rand()) / RAND_MAX * INIT_WIDTH/30 ; h_point[i][1] = ((double)rand()-(double)rand()) / RAND_MAX * INIT_WIDTH/30 ; h_point[i][2] = ((double)rand()-(double)rand()) / RAND_MAX * INIT_WIDTH/30 ; } for (int i = 0; i < NUM_POINTS; i++) { v_point[i][0] = 0; v_point[i][1] = 0; v_point[i][2] = 0; st_point[i]=visc; e_point[i]=ref; T_point[i]=temp; J_point[i]=cap*M*temp; } checkCudaErrors(hipMalloc((void**)&d_point, 3 * NUM_POINTS * sizeof(float))); checkCudaErrors(hipMalloc((void**)&dv_point, 3 * NUM_POINTS * sizeof(float))); checkCudaErrors(hipMalloc((void**)&dst_point, NUM_POINTS * sizeof(float))); checkCudaErrors(hipMalloc((void**)&de_point, NUM_POINTS * sizeof(float))); checkCudaErrors(hipMalloc((void**)&dT_point, NUM_POINTS * sizeof(float))); checkCudaErrors(hipMalloc((void**)&dJ_point, NUM_POINTS * sizeof(float))); checkCudaErrors(hipMemcpy(d_point, h_point, 3 * NUM_POINTS * sizeof(float) , hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(dv_point, v_point, 3 * NUM_POINTS * sizeof(float) , hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(dst_point, st_point, NUM_POINTS * sizeof(float) , hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(de_point, e_point, NUM_POINTS * sizeof(float) , hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(dT_point, T_point, NUM_POINTS * sizeof(float) , hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(dJ_point, J_point, NUM_POINTS * sizeof(float) , hipMemcpyHostToDevice)); } //CUDA void launchGPUKernel(unsigned int num_particles, float (*pos)[3], float (*vec)[3] ,float(*sti),float(*e),float(*T),float(*J), float time, float dt) { dim3 grid(Grid_x,Grid_y,Grid_z); dim3 block(Block_x,Block_y,Block_z); hipLaunchKernelGGL(( grav_v), dim3(grid) , dim3(block), 0, 0, pos, vec, sti, e, T, J, time, dt); hipLaunchKernelGGL(( grav_p), dim3(grid) , dim3(block), 0, 0, pos, vec, time, dt); } // void runGPUKernel(void) { launchGPUKernel(NUM_POINTS, d_point, dv_point ,dst_point, de_point,dT_point,dJ_point, anim_time, anim_dt); checkCudaErrors(hipMemcpy(h_point, d_point, 3 * NUM_POINTS * sizeof(float) , hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(v_point, dv_point, 3 * NUM_POINTS * sizeof(float) , hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(st_point, dst_point, NUM_POINTS * sizeof(float) , hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(e_point, de_point, NUM_POINTS * sizeof(float) , hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(T_point, dT_point, NUM_POINTS * sizeof(float) , hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(J_point, dJ_point, NUM_POINTS * sizeof(float) , hipMemcpyDeviceToHost)); anim_time += anim_dt; } // void defineViewMatrix(double phi, double theta) { unsigned int i; double c, s, xy_dist; double x_axis[3], y_axis[3], z_axis[3]; // eye[Z] = sin(theta * PI / 180.0); xy_dist = cos(theta * PI / 180.0); c = cos(phi * PI / 180.0); s = sin(phi * PI / 180.0); eye[X] = xy_dist * c; eye[Y] = xy_dist * s; up[X] = - c * eye[Z]; up[Y] = - s * eye[Z]; up[Z] = s * eye[Y] + c * eye[X]; normVec(up); // for (i = 0; i < 3; i++) { z_axis[i] = eye[i] - center[i]; } normVec(z_axis); cross(up, z_axis, x_axis); normVec(x_axis); cross(z_axis, x_axis, y_axis); gluLookAt(eye[X], eye[Y], eye[Z], center[X], center[Y], center[Z], up[X], up[Y], up[Z]); } void display(void) { double nrml_vec[3]; light_pos[0] = (float)eye[X]; light_pos[1] = (float)eye[Y]; light_pos[2] = (float)eye[Z]; light_pos[3] = 0.0f; //CUDA runGPUKernel(); // glLightfv(GL_LIGHT0, GL_POSITION, light_pos); //glEnable(GL_LIGHTING); glMatrixMode(GL_PROJECTION); //glFrustum(-1000000, 1000000, -1000000, 1000000, -1000000, 1000000); glLoadIdentity(); glOrtho(-vision_size-right_motion/2, vision_size+right_motion/2, -vision_size-right_motion/2, vision_size+right_motion/2, -100*vision_size, 100*vision_size); glViewport(0, 0, window_width, window_height); defineViewMatrix(phi, theta); glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); glBegin(GL_QUADS); // TBD for (int k = 0 ; k < NUM_POINTS ; k++) { // if(T_point[k]>10000){ glColor3f(0.75f,1.0f,1.0f); } else if(T_point[k]>8000){ glColor3f((GLfloat)(1.0-0.25/2000*(T_point[k]-8000)),1.0f,1.0f); } else if(T_point[k]>6000){ glColor3f(1.0f,(GLfloat)(0.25+0.25/2000*(T_point[k]-6000)),(GLfloat)(1.0/2000*(T_point[k]-6000))); } else if(T_point[k]>2000){ glColor3f(1.0f,(GLfloat)(0.25+0.25/2000*(T_point[k]-2000)),0.0f); } else if(T_point[k]>0){ glColor3f((GLfloat)(0.25/2000*T_point[k]),0.25f,0.0f); } for (int i = 0 ; i < dev + 1 ; i ++) { for (int j = 0 ; j < 2 * dev + 1 ; j++) { normal(point[i * (dev-1) + j],point[(i + 1) * (dev-1) + j + 1],point[(i+1) * (dev-1) + j],nrml_vec); glNormal3dv(nrml_vec); glVertex3d(point[i * (dev-1) + j][X] + h_point[k][X], point[i * (dev-1) + j][Y] + h_point[k][Y], point[i * (dev-1) + j][Z] + h_point[k][Z]); glVertex3d(point[(i + 1) * (dev-1) + j][X] + h_point[k][X],point[(i + 1) * (dev-1) + j][Y] + h_point[k][Y],point[(i + 1) * (dev-1) + j][Z] + h_point[k][Z]); glVertex3d(point[(i + 1) * (dev-1) + j + 1][X] + h_point[k][X], point[(i + 1) * (dev-1) + j + 1][Y] + h_point[k][Y], point[(i + 1) * (dev-1) + j + 1][Z] + h_point[k][Z]); glVertex3d(point[i * (dev-1) + j + 1][X] + h_point[k][X],point[i * (dev-1) + j + 1][Y] + h_point[k][Y],point[i * (dev-1) + j + 1][Z] + h_point[k][Z]); } } } glEnd(); glutSwapBuffers(); glutPostRedisplay(); } void mouse_button(int button, int state, int x, int y) { if ((state == GLUT_DOWN) && (button == GLUT_LEFT_BUTTON)) motion_p = true; if ((state == GLUT_DOWN) && (button == GLUT_RIGHT_BUTTON)) motion_w = true; else if (state == GLUT_UP) { motion_p = false; motion_w = false; } mouse_old_x = x; mouse_old_y = y; } void mouse_motion(int x, int y) { int dx, dy; dx = x - mouse_old_x; dy = y - mouse_old_y; if (motion_p) { phi -= dx * 0.2; theta += dy * 0.2; } if (motion_w) { right_motion += dx ; up_motion -= dy ; } mouse_old_x = x; mouse_old_y = y; glutPostRedisplay(); } void resize(int width, int height) { //unsigned int old_width, old_height; //old_width = window_width; //old_height = window_height; window_width = width; window_height = height; //vision_size=vision_size*(double)window_width/((double)window_width+right_motion)*(double)window_height/((double)window_height+up_motion); } void keyboard(unsigned char key, int x, int y) { switch (key) { case 'q': case 'Q': case '\033': exit(0); default: break; } } bool initGL(void) { glClearColor(0.0f, 0.0f , 0.0f, 0.5f); glEnable(GL_DEPTH_TEST); glClearDepth(1.0); glDepthFunc(GL_LESS); glEnable(GL_LIGHT0); return true; } int main(int argc, char** argv) { double yangle,zangle; double r; point = (double **)malloc(sizeof(double *) * num_points); for (int i = 0 ; i < num_points ; i++) { point[i] = (double *)malloc(sizeof(double) * 3); } for (int i = 0 ; i < dev + 1; i ++) { zangle = i * PI / dev; r=R * sin(zangle); for (int j = 0 ; j < dev + 1; j++) { yangle=j * PI * 2 / dev; point[i * dev + j][X] = r * sin(yangle); point[i * dev + j][Y] = r * cos(yangle); point[i * dev + j][Z] = R * cos(zangle); } } glutInit(&argc, argv); glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH); glutInitWindowSize(window_width, window_height); glutCreateWindow("3D CUDA Simulation"); glutDisplayFunc(display); glutReshapeFunc(resize); glutKeyboardFunc(keyboard); glutMouseFunc(mouse_button); glutMotionFunc(mouse_motion); setInitialPosition(); if (!initGL()) return 1; glutMainLoop(); hipFree(d_point); hipFree(dv_point); hipFree(dst_point); hipFree(de_point); hipFree(dT_point); hipFree(dJ_point); hipDeviceReset(); for (int i = 0 ; i < num_points ; i++) { free (point[i]); } free (point); return 0; }
ac7f4d4b8505038fca0eaa3c485709429278469e.cu
#include <stdio.h> #include <stdlib.h> #include <GL/gl.h> #include <GL/glut.h> #include <math.h> #include <stdbool.h> #include <omp.h> #include <cuda.h> #include <helper_cuda.h> #include <helper_functions.h> #include <cuda_runtime.h> #define PI 3.141592653589793 #define cap 1000 #define ref 0.4 #define temp 4273 #define visc 9 #define GRAV (6.674*0.00000000000000000001) #define density (2.5 * 1000000000000) #define rad 20 #define dev 12 #define M (4 / 3 * PI * rad*rad*rad* density) #define X 0 #define Y 1 #define Z 2 #define ANIM 1000000 #define scale 0.01 #define colmargin 1.05 #define R (rad * scale) #define INIT_WIDTH 800 #define INIT_HEIGHT 800 #define vision 40 #define Grid_x 1 //block間は同期できない #define Grid_y 1 #define Grid_z 1 #define Block_x 64 #define Block_y 8 #define Block_z 1 #define NUM_POINTS (Grid_x*Grid_y*Grid_z*Block_x*Block_y*Block_z) unsigned int num_points = (dev + 1) * (dev + 1); unsigned int window_width = INIT_WIDTH; unsigned int window_height = INIT_HEIGHT; double vision_size = vision; float right_motion=0; float up_motion=0; double left, right, bottom, top; float h_point[NUM_POINTS][3]; float v_point[NUM_POINTS][3]; float st_point[NUM_POINTS]; float e_point[NUM_POINTS]; float T_point[NUM_POINTS]; float J_point[NUM_POINTS]; float anim_time = ANIM; float anim_dt = 0.1; double phi = 30.0; double theta = 30.0; float light_pos[4]; int mouse_old_x, mouse_old_y; bool motion_p; bool motion_w; double eye[3]; double center[3] = {0.0, 0.0, 0.0}; double up[3]; double ** point; float (*d_point)[3]; float (*dv_point)[3]; float (*dst_point); float (*de_point); float (*dT_point); float (*dJ_point); __global__ void grav_v(float (*pos)[3], float(*vec)[3] ,float(*sti),float(*e),float(*T),float(*J), float time, float dt); __global__ void grav_p(float (*pos)[3], float(*vec)[3] , float time, float dt); //基本関数群 double dot(double vec0[], double vec1[]) { return(vec0[X] * vec1[X] + vec0[Y] * vec1[Y] + vec0[Z] * vec1[Z]); } void cross(double vec0[], double vec1[], double vec2[]) { vec2[X] = vec0[Y] * vec1[Z] - vec0[Z] * vec1[Y]; vec2[Y] = vec0[Z] * vec1[X] - vec0[X] * vec1[Z]; vec2[Z] = vec0[X] * vec1[Y] - vec0[Y] * vec1[X]; } void normVec(double vec[]) { double norm; norm = sqrt(vec[X] * vec[X] + vec[Y] * vec[Y] + vec[Z] * vec[Z]); vec[X] /= norm; vec[Y] /= norm; vec[Z] /= norm; } void normal(double p0[], double p1[], double p2[], double normal[]) { unsigned int i; double v0[3], v1[3]; for (i = 0; i < 3; i++) { v0[i] = p2[i] - p1[i]; v1[i] = p0[i] - p1[i]; } cross(v0, v1, normal); normVec(normal); } //重力影響後の速度を決定 __global__ void grav_v(float (*pos)[3],float(*vec)[3],float(*sti),float(*e),float(*T),float(*J), float time, float dt) { double xn,yn,zn,vx,vy,vz,dis,sq; unsigned int thread_idx = threadIdx.x+blockDim.x*blockIdx.x; unsigned int thread_idy = threadIdx.y+blockDim.y*blockIdx.y; unsigned int thread_idz = threadIdx.z+blockDim.z*blockIdx.z; unsigned int index = (blockDim.x * Grid_x) * (blockDim.y * Grid_y) * thread_idz + (blockDim.x * Grid_x) * thread_idy + thread_idx ; double v_buff[3]={0}; double coltime[NUM_POINTS][2]={0}; int colnum=0; double gravity=0; xn = pos[index][0]; yn = pos[index][1]; zn = pos[index][2]; vx = vec[index][0]; vy = vec[index][1]; vz = vec[index][2]; v_buff[0]=vx; v_buff[1]=vy; v_buff[2]=vz; for (int i = 0 ; i < NUM_POINTS; i++) { sq = pow((double)(xn-pos[i][0]),2) + pow((double)(yn-pos[i][1]),2) + pow((double)(zn-pos[i][2]),2); gravity=GRAV*M/sq*scale*scale; dis = sqrt(sq); //衝突域侵入判定 if (dis > 2 * R * colmargin && i != index) { //他粒子へ与える運動エネルギーと内部エネルギーの交換 J[index]-=0.5*M*fabs((float)(pow((double)vec[i][0]/scale,2)+pow((double)vec[i][1]/scale,2)+pow((double)vec[i][2]/scale,2))-(float)(pow((double)(vec[i][0]/scale +((pos[i][0]-xn)/dis)*gravity*ANIM),2)+pow((double)(vec[i][1]/scale + ((pos[i][1]-yn)/dis)*gravity*ANIM),2)+pow((double)(vec[i][2]/scale + ((pos[i][2]-zn)/dis)*gravity*ANIM),2))); //速度更新 vx = vx + ((pos[i][0]-xn)/dis)*gravity*ANIM*scale; vy = vy + ((pos[i][1]-yn)/dis)*gravity*ANIM*scale; vz = vz + ((pos[i][2]-zn)/dis)*gravity*ANIM*scale; } else if (i != index) { //衝突域侵入からの経過の時間を記録 TBD 法線方向に直す coltime[i][1]=i; coltime[i][0]=(2*R*colmargin - dis)/(pow((double)(vx-vec[i][0]),2)+pow((double)(vy-vec[i][1]),2)+pow((double)(vz-vec[i][2]),2)); colnum++; } } __syncthreads(); if(colnum>0) { //衝突域侵入からの経過時間をインデックス付きソート double tmp[2]={0}; for (int i = 0 ; i < NUM_POINTS; i++){ for(int j = i+1; j < NUM_POINTS; j++){ if(coltime[i][0] > coltime[j][0]){ tmp[0]=coltime[i][0]; tmp[1]=coltime[i][1]; coltime[i][0]=coltime[j][0]; coltime[i][1]=coltime[j][1]; coltime[j][0]=tmp[0]; coltime[j][1]=tmp[1]; } } } //衝突域侵入からの経過時間が長いものから処理 for (int i=NUM_POINTS-1 ; i>=NUM_POINTS-colnum; i--){ int colindex=coltime[i][1]; float repul=0; if (colindex != index) { repul=e[index]; //反発係数は小さいほうを優先 if (e[colindex] < e[index]) { repul=e[colindex]; } //速度更新 v_buff[0]=(double)((1+repul)*M*vec[colindex][0]+(M-repul*M)*v_buff[0])/(M+M); v_buff[1]=(double)((1+repul)*M*vec[colindex][1]+(M-repul*M)*v_buff[1])/(M+M); v_buff[2]=(double)((1+repul)*M*vec[colindex][2]+(M-repul*M)*v_buff[2])/(M+M); //衝突エネルギーをstiの比で分配し熱エネルギー変換 double Energy=0.5*(1-repul*repul)*(M*(pow((double)vx/scale,2)+pow((double)vy/scale,2)+pow((double)vz/scale,2)) + M*(pow((double)vec[colindex][0]/scale,2)+pow((double)vec[colindex][1]/scale,2)+pow((double)vec[colindex][2]/scale,2))); //J[index]+=Energy * pow(10.0,(double)sti[index]) / pow(10.0,(double)sti[index]) + pow(10.0,(double)sti[colindex]); J[index]+=Energy * (double)sti[index] / ((double)sti[index] + (double)sti[colindex]); T[index]=(J[index]-0.5*M*(pow((double)v_buff[0],2)+pow((double)v_buff[1],2)+pow((double)v_buff[2],2)))/M/cap; //粘性と反発係数の更新 //e[index] = e[index] * (visc+(temp/100)-(T[index]/100)-0.5*log(M))/(visc-0.5*log(M)); e[index] = e[index] * (visc+(temp/100)-(T[index]/100))/(visc); sti[index] = visc - ((T[index] - temp) / 100); } } } __syncthreads(); if (colnum>0) { vec[index][0] = (float)v_buff[0]; vec[index][1] = (float)v_buff[1]; vec[index][2] = (float)v_buff[2]; } else { vec[index][0] = (float)vx; vec[index][1] = (float)vy; vec[index][2] = (float)vz; } //内部エネルギーと運動エネルギーから熱エネルギー更新 T[index]=(J[index]-0.5*M*(pow((double)vec[index][0]/scale,2)+pow((double)vec[index][1]/scale,2)+pow((double)vec[index][2]/scale,2)))/M/cap; //粘性と反発係数の更新 //e[index] = e[index] * (visc+(temp/100)-(T[index]/100)-0.5*log(M))/(visc-0.5*log(M)); e[index] = e[index] * (visc+(temp/100)-(T[index]/100))/(visc); sti[index] = visc - ((T[index] - temp) / 100); } //重力影響後の座標を決定 __global__ void grav_p(float (*pos)[3], float(*vec)[3] , float time, float dt) { double xn,yn,zn,vx,vy,vz; unsigned int thread_idx = threadIdx.x+blockDim.x*blockIdx.x; unsigned int thread_idy = threadIdx.y+blockDim.y*blockIdx.y; unsigned int thread_idz = threadIdx.z+blockDim.z*blockIdx.z; unsigned int index = ( blockDim.x * (Grid_x - 1) + blockDim.x ) * ( blockDim.y * (Grid_y - 1) + blockDim.y ) * thread_idz + ( blockDim.x * (Grid_x - 1) + blockDim.x ) * thread_idy + thread_idx ; xn = pos[index][0]; yn = pos[index][1]; zn = pos[index][2]; vx = vec[index][0]; vy = vec[index][1]; vz = vec[index][2]; pos[index][0] = xn + vx * dt; pos[index][1] = yn + vy * dt; pos[index][2] = zn + vz * dt; } // 粒子を初期位置に配置. void setInitialPosition(void) { for (int i = 0; i < NUM_POINTS; i++) { h_point[i][0] = ((double)rand()-(double)rand()) / RAND_MAX * INIT_WIDTH/30 ; h_point[i][1] = ((double)rand()-(double)rand()) / RAND_MAX * INIT_WIDTH/30 ; h_point[i][2] = ((double)rand()-(double)rand()) / RAND_MAX * INIT_WIDTH/30 ; } for (int i = 0; i < NUM_POINTS; i++) { v_point[i][0] = 0; v_point[i][1] = 0; v_point[i][2] = 0; st_point[i]=visc; e_point[i]=ref; T_point[i]=temp; J_point[i]=cap*M*temp; } checkCudaErrors(cudaMalloc((void**)&d_point, 3 * NUM_POINTS * sizeof(float))); checkCudaErrors(cudaMalloc((void**)&dv_point, 3 * NUM_POINTS * sizeof(float))); checkCudaErrors(cudaMalloc((void**)&dst_point, NUM_POINTS * sizeof(float))); checkCudaErrors(cudaMalloc((void**)&de_point, NUM_POINTS * sizeof(float))); checkCudaErrors(cudaMalloc((void**)&dT_point, NUM_POINTS * sizeof(float))); checkCudaErrors(cudaMalloc((void**)&dJ_point, NUM_POINTS * sizeof(float))); checkCudaErrors(cudaMemcpy(d_point, h_point, 3 * NUM_POINTS * sizeof(float) , cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(dv_point, v_point, 3 * NUM_POINTS * sizeof(float) , cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(dst_point, st_point, NUM_POINTS * sizeof(float) , cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(de_point, e_point, NUM_POINTS * sizeof(float) , cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(dT_point, T_point, NUM_POINTS * sizeof(float) , cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(dJ_point, J_point, NUM_POINTS * sizeof(float) , cudaMemcpyHostToDevice)); } //CUDA実行関数 void launchGPUKernel(unsigned int num_particles, float (*pos)[3], float (*vec)[3] ,float(*sti),float(*e),float(*T),float(*J), float time, float dt) { dim3 grid(Grid_x,Grid_y,Grid_z); dim3 block(Block_x,Block_y,Block_z); grav_v<<<grid , block>>>(pos, vec, sti, e, T, J, time, dt); grav_p<<<grid , block>>>(pos, vec, time, dt); } //アニメーション動作 void runGPUKernel(void) { launchGPUKernel(NUM_POINTS, d_point, dv_point ,dst_point, de_point,dT_point,dJ_point, anim_time, anim_dt); checkCudaErrors(cudaMemcpy(h_point, d_point, 3 * NUM_POINTS * sizeof(float) , cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(v_point, dv_point, 3 * NUM_POINTS * sizeof(float) , cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(st_point, dst_point, NUM_POINTS * sizeof(float) , cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(e_point, de_point, NUM_POINTS * sizeof(float) , cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(T_point, dT_point, NUM_POINTS * sizeof(float) , cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(J_point, dJ_point, NUM_POINTS * sizeof(float) , cudaMemcpyDeviceToHost)); anim_time += anim_dt; } //ビュー定義 void defineViewMatrix(double phi, double theta) { unsigned int i; double c, s, xy_dist; double x_axis[3], y_axis[3], z_axis[3]; // 視点の設定. eye[Z] = sin(theta * PI / 180.0); xy_dist = cos(theta * PI / 180.0); c = cos(phi * PI / 180.0); s = sin(phi * PI / 180.0); eye[X] = xy_dist * c; eye[Y] = xy_dist * s; up[X] = - c * eye[Z]; up[Y] = - s * eye[Z]; up[Z] = s * eye[Y] + c * eye[X]; normVec(up); // 視点を原点とする座標系の定義. for (i = 0; i < 3; i++) { z_axis[i] = eye[i] - center[i]; } normVec(z_axis); cross(up, z_axis, x_axis); normVec(x_axis); cross(z_axis, x_axis, y_axis); gluLookAt(eye[X], eye[Y], eye[Z], center[X], center[Y], center[Z], up[X], up[Y], up[Z]); } void display(void) { double nrml_vec[3]; light_pos[0] = (float)eye[X]; light_pos[1] = (float)eye[Y]; light_pos[2] = (float)eye[Z]; light_pos[3] = 0.0f; //CUDA開始 runGPUKernel(); // 光源の設定 glLightfv(GL_LIGHT0, GL_POSITION, light_pos); //glEnable(GL_LIGHTING); glMatrixMode(GL_PROJECTION); //glFrustum(-1000000, 1000000, -1000000, 1000000, -1000000, 1000000); glLoadIdentity(); glOrtho(-vision_size-right_motion/2, vision_size+right_motion/2, -vision_size-right_motion/2, vision_size+right_motion/2, -100*vision_size, 100*vision_size); glViewport(0, 0, window_width, window_height); defineViewMatrix(phi, theta); glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); glBegin(GL_QUADS); //球体をポリゴンで作成 TBD メタボール for (int k = 0 ; k < NUM_POINTS ; k++) { //温度によって色を変化 if(T_point[k]>10000){ glColor3f(0.75f,1.0f,1.0f); } else if(T_point[k]>8000){ glColor3f((GLfloat)(1.0-0.25/2000*(T_point[k]-8000)),1.0f,1.0f); } else if(T_point[k]>6000){ glColor3f(1.0f,(GLfloat)(0.25+0.25/2000*(T_point[k]-6000)),(GLfloat)(1.0/2000*(T_point[k]-6000))); } else if(T_point[k]>2000){ glColor3f(1.0f,(GLfloat)(0.25+0.25/2000*(T_point[k]-2000)),0.0f); } else if(T_point[k]>0){ glColor3f((GLfloat)(0.25/2000*T_point[k]),0.25f,0.0f); } for (int i = 0 ; i < dev + 1 ; i ++) { for (int j = 0 ; j < 2 * dev + 1 ; j++) { normal(point[i * (dev-1) + j],point[(i + 1) * (dev-1) + j + 1],point[(i+1) * (dev-1) + j],nrml_vec); glNormal3dv(nrml_vec); glVertex3d(point[i * (dev-1) + j][X] + h_point[k][X], point[i * (dev-1) + j][Y] + h_point[k][Y], point[i * (dev-1) + j][Z] + h_point[k][Z]); glVertex3d(point[(i + 1) * (dev-1) + j][X] + h_point[k][X],point[(i + 1) * (dev-1) + j][Y] + h_point[k][Y],point[(i + 1) * (dev-1) + j][Z] + h_point[k][Z]); glVertex3d(point[(i + 1) * (dev-1) + j + 1][X] + h_point[k][X], point[(i + 1) * (dev-1) + j + 1][Y] + h_point[k][Y], point[(i + 1) * (dev-1) + j + 1][Z] + h_point[k][Z]); glVertex3d(point[i * (dev-1) + j + 1][X] + h_point[k][X],point[i * (dev-1) + j + 1][Y] + h_point[k][Y],point[i * (dev-1) + j + 1][Z] + h_point[k][Z]); } } } glEnd(); glutSwapBuffers(); glutPostRedisplay(); } void mouse_button(int button, int state, int x, int y) { if ((state == GLUT_DOWN) && (button == GLUT_LEFT_BUTTON)) motion_p = true; if ((state == GLUT_DOWN) && (button == GLUT_RIGHT_BUTTON)) motion_w = true; else if (state == GLUT_UP) { motion_p = false; motion_w = false; } mouse_old_x = x; mouse_old_y = y; } void mouse_motion(int x, int y) { int dx, dy; dx = x - mouse_old_x; dy = y - mouse_old_y; if (motion_p) { phi -= dx * 0.2; theta += dy * 0.2; } if (motion_w) { right_motion += dx ; up_motion -= dy ; } mouse_old_x = x; mouse_old_y = y; glutPostRedisplay(); } void resize(int width, int height) { //unsigned int old_width, old_height; //old_width = window_width; //old_height = window_height; window_width = width; window_height = height; //vision_size=vision_size*(double)window_width/((double)window_width+right_motion)*(double)window_height/((double)window_height+up_motion); } void keyboard(unsigned char key, int x, int y) { switch (key) { case 'q': case 'Q': case '\033': exit(0); default: break; } } bool initGL(void) { glClearColor(0.0f, 0.0f , 0.0f, 0.5f); glEnable(GL_DEPTH_TEST); glClearDepth(1.0); glDepthFunc(GL_LESS); glEnable(GL_LIGHT0); return true; } int main(int argc, char** argv) { double yangle,zangle; double r; point = (double **)malloc(sizeof(double *) * num_points); for (int i = 0 ; i < num_points ; i++) { point[i] = (double *)malloc(sizeof(double) * 3); } for (int i = 0 ; i < dev + 1; i ++) { zangle = i * PI / dev; r=R * sin(zangle); for (int j = 0 ; j < dev + 1; j++) { yangle=j * PI * 2 / dev; point[i * dev + j][X] = r * sin(yangle); point[i * dev + j][Y] = r * cos(yangle); point[i * dev + j][Z] = R * cos(zangle); } } glutInit(&argc, argv); glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH); glutInitWindowSize(window_width, window_height); glutCreateWindow("3D CUDA Simulation"); glutDisplayFunc(display); glutReshapeFunc(resize); glutKeyboardFunc(keyboard); glutMouseFunc(mouse_button); glutMotionFunc(mouse_motion); setInitialPosition(); if (!initGL()) return 1; glutMainLoop(); cudaFree(d_point); cudaFree(dv_point); cudaFree(dst_point); cudaFree(de_point); cudaFree(dT_point); cudaFree(dJ_point); cudaDeviceReset(); for (int i = 0 ; i < num_points ; i++) { free (point[i]); } free (point); return 0; }
b4cbb8d41df7eb4c5f76f8fd9218c4d5a69652e2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" // filename: eeTanh.cu // a simple CUDA kernel to square the elements of a matrix extern "C" // ensure function name to be exactly "eeTanh" { } __global__ void absErr(int N, int M, float *A, float *Y) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int index = j*N + i; if (i < N && j < M) { A[index] = fabsf(__fsub_rn(A[index], Y[index])); // A[index] = abs(A[index]-Y[index]) } }
b4cbb8d41df7eb4c5f76f8fd9218c4d5a69652e2.cu
#include "includes.h" // filename: eeTanh.cu // a simple CUDA kernel to square the elements of a matrix extern "C" // ensure function name to be exactly "eeTanh" { } __global__ void absErr(int N, int M, float *A, float *Y) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int index = j*N + i; if (i < N && j < M) { A[index] = fabsf(__fsub_rn(A[index], Y[index])); // A[index] = abs(A[index]-Y[index]) } }
7f12741d73d1f2a3b7c195ac999250a2dddd908c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" // This is my deviece function // __global__ means this function is visible to the host __global__ void kernelHelloWorld() { printf("Hello World!\n"); }
7f12741d73d1f2a3b7c195ac999250a2dddd908c.cu
#include "includes.h" // This is my deviece function // __global__ means this function is visible to the host __global__ void kernelHelloWorld() { printf("Hello World!\n"); }
87d59bcbb82b5ae66954c799981acceaa0b9f77e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "RandomGenerator.cuh" #include "global_function.cuh" #include "check_cuda.h" hiprandGenerator_t RandomGenerator::gen; void RandomGenerator::initCudaRandGenerator() { hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_DEFAULT); hiprandSetPseudoRandomGeneratorSeed(gen, time(NULL)); } void RandomGenerator::destroyCudaRandGenerator() { gen = nullptr; hiprandDestroyGenerator(gen); } __global__ void map_float2int(int *d_iData, float const *d_fData, int low_threshold, int high_threshold, size_t size) { unsigned int myId = global_func::getThreadId(); if (myId >= size) return; d_iData[myId] = int(d_fData[myId] * (high_threshold - low_threshold) + low_threshold); } bool RandomGenerator::gpu_Uniform(int *d_min_max_array, int low_threshold, int high_threshold, int array_length) { if (d_min_max_array == NULL) return false; float *d_uniform = NULL; checkCudaErrors(hipMalloc((void **) &d_uniform, array_length * sizeof(float))); hiprandGenerateUniform(gen, d_uniform, array_length); int nThreads; dim3 nBlocks; if (!global_func::setThreadsBlocks(nBlocks, nThreads, array_length)) return false; map_float2int << < nBlocks, nThreads >> > (d_min_max_array, d_uniform, low_threshold, high_threshold, array_length); checkCudaErrors(hipDeviceSynchronize()); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipFree(d_uniform)); return true; }
87d59bcbb82b5ae66954c799981acceaa0b9f77e.cu
#include "RandomGenerator.cuh" #include "global_function.cuh" #include "check_cuda.h" curandGenerator_t RandomGenerator::gen; void RandomGenerator::initCudaRandGenerator() { curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT); curandSetPseudoRandomGeneratorSeed(gen, time(NULL)); } void RandomGenerator::destroyCudaRandGenerator() { gen = nullptr; curandDestroyGenerator(gen); } __global__ void map_float2int(int *d_iData, float const *d_fData, int low_threshold, int high_threshold, size_t size) { unsigned int myId = global_func::getThreadId(); if (myId >= size) return; d_iData[myId] = int(d_fData[myId] * (high_threshold - low_threshold) + low_threshold); } bool RandomGenerator::gpu_Uniform(int *d_min_max_array, int low_threshold, int high_threshold, int array_length) { if (d_min_max_array == NULL) return false; float *d_uniform = NULL; checkCudaErrors(cudaMalloc((void **) &d_uniform, array_length * sizeof(float))); curandGenerateUniform(gen, d_uniform, array_length); int nThreads; dim3 nBlocks; if (!global_func::setThreadsBlocks(nBlocks, nThreads, array_length)) return false; map_float2int << < nBlocks, nThreads >> > (d_min_max_array, d_uniform, low_threshold, high_threshold, array_length); checkCudaErrors(cudaDeviceSynchronize()); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaFree(d_uniform)); return true; }
2a28216bc9c5a789a1e70b7c94aa60fd5ae02fcf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <rocblas.h> // clang-format off #include "tensors/gpu/prod.h" #include "tensors/gpu/backend.h" #include "tensors/gpu/cuda_helpers.h" // clang-format on namespace marian { namespace gpu { void Prod(marian::Tensor C, marian::Tensor A, marian::Tensor B, bool transA, bool transB, float beta, float scalar) { hipSetDevice(C->getDevice().no); float alpha = scalar; size_t m = A->shape().elements() / A->shape().back(); size_t k = A->shape().back(); if(transA) std::swap(m, k); size_t l = B->shape().elements() / B->shape().back(); size_t n = B->shape().back(); if(transB) std::swap(l, n); size_t lda = A->shape().back(); size_t ldb = B->shape().back(); size_t ldc = B->shape().back(); if(transB) ldc = B->shape().elements() / B->shape().back(); hipblasOperation_t opA = transA ? HIPBLAS_OP_T : HIPBLAS_OP_N; hipblasOperation_t opB = transB ? HIPBLAS_OP_T : HIPBLAS_OP_N; auto cublasHandle = std::static_pointer_cast<gpu::Backend>(C->getBackend()) ->getCublasHandle(); #if TORCH_HIP_VERSION >= 9000 cublasSetMathMode(cublasHandle, CUBLAS_TENSOR_OP_MATH); #endif hipblasSgemm(cublasHandle, opB, opA, n, m, k, &alpha, B->data(), ldb, A->data(), lda, &beta, C->data(), ldc); #if TORCH_HIP_VERSION >= 9000 cublasSetMathMode(cublasHandle, CUBLAS_DEFAULT_MATH); #endif } __global__ void gAddBias(float* out, const float* bias, size_t length, size_t cols) { for(int bid = 0; bid < length; bid += blockDim.x * gridDim.x) { int index = bid + blockDim.x * blockIdx.x + threadIdx.x; if(index < length) { size_t index2 = index % cols; out[index] += bias[index2]; } } } void AddBias(marian::Tensor C, const marian::Tensor bias) { hipSetDevice(C->getDevice().no); int length = C->shape().elements(); int cols = bias->shape().elements(); int threads = ::min(MAX_THREADS, length); int blocks = ::min(MAX_BLOCKS, length / threads + (length % threads != 0)); hipLaunchKernelGGL(( gAddBias), dim3(blocks), dim3(threads), 0, 0, C->data(), bias->data(), length, cols); hipStreamSynchronize(0); } void ProdWithBias(marian::Tensor C, const marian::Tensor A, const marian::Tensor B, const marian::Tensor bias, bool transA, bool transB, float beta, float scalar) { marian::gpu::Prod(C, A, B, transA, transB, beta, scalar); marian::gpu::AddBias(C, bias); } // //void ProdBatched2(marian::Tensor C, // const marian::Tensor A, // const marian::Tensor B, // bool transA, // bool transB, // float beta, // float scalar) { // hipSetDevice(C->getDevice().no); // float alpha = scalar; // // size_t batchA = A->shape().elements() / (A->shape()[-1] * A->shape()[-2]); // size_t batchB = B->shape().elements() / (B->shape()[-1] * B->shape()[-2]); // // size_t m = A->shape()[-2]; // size_t k = A->shape()[-1]; // if(transA) // std::swap(m, k); // // size_t l = B->shape()[-2]; // size_t n = B->shape()[-1]; // if(transB) // std::swap(l, n); // // size_t lda = A->shape()[-1]; // size_t ldb = B->shape()[-1]; // size_t ldc = B->shape()[-1]; // // if(transB) // ldc = B->shape()[-2]; // // hipblasOperation_t opA = transA ? HIPBLAS_OP_T : HIPBLAS_OP_N; // hipblasOperation_t opB = transB ? HIPBLAS_OP_T : HIPBLAS_OP_N; // // auto cublasHandle = std::static_pointer_cast<gpu::Backend>(C->getBackend()) // ->getCublasHandle(); // //#if TORCH_HIP_VERSION >= 9000 // cublasSetMathMode(cublasHandle, CUBLAS_TENSOR_OP_MATH); //#endif // hipblasSgemmStridedBatched(cublasHandle, // opB, // opA, // n, // m, // k, // &alpha, // B->data(), // ldb, // batchB == 1 ? 0 : n * k, // A->data(), // lda, // batchA == 1 ? 0 : m * k, // &beta, // C->data(), // ldc, // n * m, // ::max(batchA, batchB)); //#if TORCH_HIP_VERSION >= 9000 // cublasSetMathMode(cublasHandle, CUBLAS_DEFAULT_MATH); //#endif //} void ProdBatched(marian::Tensor C, Ptr<Allocator> allocator, const marian::Tensor A, const marian::Tensor B, bool transA, bool transB, float beta, float scalar) { hipSetDevice(C->getDevice().no); float alpha = scalar; size_t batchA = A->shape().elements() / (A->shape()[-1] * A->shape()[-2]); size_t batchB = B->shape().elements() / (B->shape()[-1] * B->shape()[-2]); size_t m = A->shape()[-2]; size_t k = A->shape()[-1]; if(transA) std::swap(m, k); size_t l = B->shape()[-2]; size_t n = B->shape()[-1]; if(transB) std::swap(l, n); size_t lda = A->shape()[-1]; size_t ldb = B->shape()[-1]; size_t ldc = B->shape()[-1]; if(transB) ldc = B->shape()[-2]; hipblasOperation_t opA = transA ? HIPBLAS_OP_T : HIPBLAS_OP_N; hipblasOperation_t opB = transB ? HIPBLAS_OP_T : HIPBLAS_OP_N; auto cublasHandle = std::static_pointer_cast<gpu::Backend>(C->getBackend()) ->getCublasHandle(); int strideA = batchA == 1 ? 0 : m * k; int strideB = batchB == 1 ? 0 : n * k; int strideC = n * m; int batchC = ::max(batchA, batchB); std::vector<const float*> aptr; std::vector<const float*> bptr; std::vector<float*> cptr; for(int i = 0; i < batchC; i++) { aptr.push_back(A->data() + (i % batchA) * strideA); bptr.push_back(B->data() + (i % batchB) * strideB); cptr.push_back(C->data() + i * strideC); } auto mp_aptr = allocator->alloc<const float*>(aptr.size()); CudaCopy(aptr.data(), aptr.data() + aptr.size(), mp_aptr->data<const float*>()); auto mp_bptr = allocator->alloc<const float*>(bptr.size()); CudaCopy(bptr.data(), bptr.data() + bptr.size(), mp_bptr->data<const float*>()); auto mp_cptr = allocator->alloc<float*>(cptr.size()); CudaCopy(cptr.data(), cptr.data() + cptr.size(), mp_cptr->data<float*>()); #if TORCH_HIP_VERSION >= 9000 cublasSetMathMode(cublasHandle, CUBLAS_TENSOR_OP_MATH); #endif hipblasSgemmBatched(cublasHandle, opB, opA, n, m, k, &alpha, mp_bptr->data<const float*>(), ldb, mp_aptr->data<const float*>(), lda, &beta, mp_cptr->data<float*>(), ldc, batchC); #if TORCH_HIP_VERSION >= 9000 cublasSetMathMode(cublasHandle, CUBLAS_DEFAULT_MATH); #endif allocator->free(mp_aptr); allocator->free(mp_bptr); allocator->free(mp_cptr); } } }
2a28216bc9c5a789a1e70b7c94aa60fd5ae02fcf.cu
#include <cublas_v2.h> // clang-format off #include "tensors/gpu/prod.h" #include "tensors/gpu/backend.h" #include "tensors/gpu/cuda_helpers.h" // clang-format on namespace marian { namespace gpu { void Prod(marian::Tensor C, marian::Tensor A, marian::Tensor B, bool transA, bool transB, float beta, float scalar) { cudaSetDevice(C->getDevice().no); float alpha = scalar; size_t m = A->shape().elements() / A->shape().back(); size_t k = A->shape().back(); if(transA) std::swap(m, k); size_t l = B->shape().elements() / B->shape().back(); size_t n = B->shape().back(); if(transB) std::swap(l, n); size_t lda = A->shape().back(); size_t ldb = B->shape().back(); size_t ldc = B->shape().back(); if(transB) ldc = B->shape().elements() / B->shape().back(); cublasOperation_t opA = transA ? CUBLAS_OP_T : CUBLAS_OP_N; cublasOperation_t opB = transB ? CUBLAS_OP_T : CUBLAS_OP_N; auto cublasHandle = std::static_pointer_cast<gpu::Backend>(C->getBackend()) ->getCublasHandle(); #if CUDA_VERSION >= 9000 cublasSetMathMode(cublasHandle, CUBLAS_TENSOR_OP_MATH); #endif cublasSgemm(cublasHandle, opB, opA, n, m, k, &alpha, B->data(), ldb, A->data(), lda, &beta, C->data(), ldc); #if CUDA_VERSION >= 9000 cublasSetMathMode(cublasHandle, CUBLAS_DEFAULT_MATH); #endif } __global__ void gAddBias(float* out, const float* bias, size_t length, size_t cols) { for(int bid = 0; bid < length; bid += blockDim.x * gridDim.x) { int index = bid + blockDim.x * blockIdx.x + threadIdx.x; if(index < length) { size_t index2 = index % cols; out[index] += bias[index2]; } } } void AddBias(marian::Tensor C, const marian::Tensor bias) { cudaSetDevice(C->getDevice().no); int length = C->shape().elements(); int cols = bias->shape().elements(); int threads = std::min(MAX_THREADS, length); int blocks = std::min(MAX_BLOCKS, length / threads + (length % threads != 0)); gAddBias<<<blocks, threads>>>(C->data(), bias->data(), length, cols); cudaStreamSynchronize(0); } void ProdWithBias(marian::Tensor C, const marian::Tensor A, const marian::Tensor B, const marian::Tensor bias, bool transA, bool transB, float beta, float scalar) { marian::gpu::Prod(C, A, B, transA, transB, beta, scalar); marian::gpu::AddBias(C, bias); } // //void ProdBatched2(marian::Tensor C, // const marian::Tensor A, // const marian::Tensor B, // bool transA, // bool transB, // float beta, // float scalar) { // cudaSetDevice(C->getDevice().no); // float alpha = scalar; // // size_t batchA = A->shape().elements() / (A->shape()[-1] * A->shape()[-2]); // size_t batchB = B->shape().elements() / (B->shape()[-1] * B->shape()[-2]); // // size_t m = A->shape()[-2]; // size_t k = A->shape()[-1]; // if(transA) // std::swap(m, k); // // size_t l = B->shape()[-2]; // size_t n = B->shape()[-1]; // if(transB) // std::swap(l, n); // // size_t lda = A->shape()[-1]; // size_t ldb = B->shape()[-1]; // size_t ldc = B->shape()[-1]; // // if(transB) // ldc = B->shape()[-2]; // // cublasOperation_t opA = transA ? CUBLAS_OP_T : CUBLAS_OP_N; // cublasOperation_t opB = transB ? CUBLAS_OP_T : CUBLAS_OP_N; // // auto cublasHandle = std::static_pointer_cast<gpu::Backend>(C->getBackend()) // ->getCublasHandle(); // //#if CUDA_VERSION >= 9000 // cublasSetMathMode(cublasHandle, CUBLAS_TENSOR_OP_MATH); //#endif // cublasSgemmStridedBatched(cublasHandle, // opB, // opA, // n, // m, // k, // &alpha, // B->data(), // ldb, // batchB == 1 ? 0 : n * k, // A->data(), // lda, // batchA == 1 ? 0 : m * k, // &beta, // C->data(), // ldc, // n * m, // std::max(batchA, batchB)); //#if CUDA_VERSION >= 9000 // cublasSetMathMode(cublasHandle, CUBLAS_DEFAULT_MATH); //#endif //} void ProdBatched(marian::Tensor C, Ptr<Allocator> allocator, const marian::Tensor A, const marian::Tensor B, bool transA, bool transB, float beta, float scalar) { cudaSetDevice(C->getDevice().no); float alpha = scalar; size_t batchA = A->shape().elements() / (A->shape()[-1] * A->shape()[-2]); size_t batchB = B->shape().elements() / (B->shape()[-1] * B->shape()[-2]); size_t m = A->shape()[-2]; size_t k = A->shape()[-1]; if(transA) std::swap(m, k); size_t l = B->shape()[-2]; size_t n = B->shape()[-1]; if(transB) std::swap(l, n); size_t lda = A->shape()[-1]; size_t ldb = B->shape()[-1]; size_t ldc = B->shape()[-1]; if(transB) ldc = B->shape()[-2]; cublasOperation_t opA = transA ? CUBLAS_OP_T : CUBLAS_OP_N; cublasOperation_t opB = transB ? CUBLAS_OP_T : CUBLAS_OP_N; auto cublasHandle = std::static_pointer_cast<gpu::Backend>(C->getBackend()) ->getCublasHandle(); int strideA = batchA == 1 ? 0 : m * k; int strideB = batchB == 1 ? 0 : n * k; int strideC = n * m; int batchC = std::max(batchA, batchB); std::vector<const float*> aptr; std::vector<const float*> bptr; std::vector<float*> cptr; for(int i = 0; i < batchC; i++) { aptr.push_back(A->data() + (i % batchA) * strideA); bptr.push_back(B->data() + (i % batchB) * strideB); cptr.push_back(C->data() + i * strideC); } auto mp_aptr = allocator->alloc<const float*>(aptr.size()); CudaCopy(aptr.data(), aptr.data() + aptr.size(), mp_aptr->data<const float*>()); auto mp_bptr = allocator->alloc<const float*>(bptr.size()); CudaCopy(bptr.data(), bptr.data() + bptr.size(), mp_bptr->data<const float*>()); auto mp_cptr = allocator->alloc<float*>(cptr.size()); CudaCopy(cptr.data(), cptr.data() + cptr.size(), mp_cptr->data<float*>()); #if CUDA_VERSION >= 9000 cublasSetMathMode(cublasHandle, CUBLAS_TENSOR_OP_MATH); #endif cublasSgemmBatched(cublasHandle, opB, opA, n, m, k, &alpha, mp_bptr->data<const float*>(), ldb, mp_aptr->data<const float*>(), lda, &beta, mp_cptr->data<float*>(), ldc, batchC); #if CUDA_VERSION >= 9000 cublasSetMathMode(cublasHandle, CUBLAS_DEFAULT_MATH); #endif allocator->free(mp_aptr); allocator->free(mp_bptr); allocator->free(mp_cptr); } } }
0f9f9b696e7cf6dbfbacea01157699dc6fe6897e.hip
// !!! This is a file automatically generated by hipify!!! /*! ************************************************************************************************** * Deformable DETR * Copyright (c) 2020 SenseTime. All Rights Reserved. * Licensed under the Apache License, Version 2.0 [see LICENSE for details] ************************************************************************************************** * Modified from *https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 ************************************************************************************************** */ #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <THH/THHAtomics.cuh> #include <ms_deform_attn_cuda_kernel.cuh> #include <vector> template <typename scalar_t> void ms_deformable_im2col_cuda(hipStream_t stream, const scalar_t *data_value, const int64_t *data_spatial_shapes, const int64_t *data_level_start_index, const scalar_t *data_sampling_loc, const scalar_t *data_attn_weight, const int batch_size, const int spatial_size, const int num_heads, const int channels, const int num_levels, const int num_query, const int num_point, scalar_t *data_col) { const int num_kernels = batch_size * num_query * num_heads * channels; const int num_actual_kernels = batch_size * num_query * num_heads * channels; const int num_threads = CUDA_NUM_THREADS; hipLaunchKernelGGL(( ms_deformable_im2col_gpu_kernel<scalar_t>) , dim3(GET_BLOCKS(num_actual_kernels, num_threads)), dim3(num_threads), 0, stream, num_kernels, data_value, data_spatial_shapes, data_level_start_index, data_sampling_loc, data_attn_weight, batch_size, spatial_size, num_heads, channels, num_levels, num_query, num_point, data_col); hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("error in ms_deformable_im2col_cuda: %s\n", hipGetErrorString(err)); } } template <typename scalar_t> void ms_deformable_col2im_cuda( hipStream_t stream, const scalar_t *grad_col, const scalar_t *data_value, const int64_t *data_spatial_shapes, const int64_t *data_level_start_index, const scalar_t *data_sampling_loc, const scalar_t *data_attn_weight, const int batch_size, const int spatial_size, const int num_heads, const int channels, const int num_levels, const int num_query, const int num_point, scalar_t *grad_value, scalar_t *grad_sampling_loc, scalar_t *grad_attn_weight) { const int num_threads = (channels > CUDA_NUM_THREADS) ? CUDA_NUM_THREADS : channels; const int num_kernels = batch_size * num_query * num_heads * channels; const int num_actual_kernels = batch_size * num_query * num_heads * channels; if (channels > 1024) { if ((channels & 1023) == 0) { hipLaunchKernelGGL(( ms_deformable_col2im_gpu_kernel_shm_reduce_v2_multi_blocks<scalar_t>) , dim3(GET_BLOCKS(num_actual_kernels, num_threads)), dim3(num_threads), num_threads * 3 * sizeof(scalar_t), stream, num_kernels, grad_col, data_value, data_spatial_shapes, data_level_start_index, data_sampling_loc, data_attn_weight, batch_size, spatial_size, num_heads, channels, num_levels, num_query, num_point, grad_value, grad_sampling_loc, grad_attn_weight); } else { hipLaunchKernelGGL(( ms_deformable_col2im_gpu_kernel_gm<scalar_t>) , dim3(GET_BLOCKS(num_actual_kernels, num_threads)), dim3(num_threads), 0, stream, num_kernels, grad_col, data_value, data_spatial_shapes, data_level_start_index, data_sampling_loc, data_attn_weight, batch_size, spatial_size, num_heads, channels, num_levels, num_query, num_point, grad_value, grad_sampling_loc, grad_attn_weight); } } else { switch (channels) { case 1: hipLaunchKernelGGL(( ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1<scalar_t, 1>) , dim3(GET_BLOCKS(num_actual_kernels, num_threads)), dim3(num_threads), 0, stream, num_kernels, grad_col, data_value, data_spatial_shapes, data_level_start_index, data_sampling_loc, data_attn_weight, batch_size, spatial_size, num_heads, channels, num_levels, num_query, num_point, grad_value, grad_sampling_loc, grad_attn_weight); break; case 2: hipLaunchKernelGGL(( ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1<scalar_t, 2>) , dim3(GET_BLOCKS(num_actual_kernels, num_threads)), dim3(num_threads), 0, stream, num_kernels, grad_col, data_value, data_spatial_shapes, data_level_start_index, data_sampling_loc, data_attn_weight, batch_size, spatial_size, num_heads, channels, num_levels, num_query, num_point, grad_value, grad_sampling_loc, grad_attn_weight); break; case 4: hipLaunchKernelGGL(( ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1<scalar_t, 4>) , dim3(GET_BLOCKS(num_actual_kernels, num_threads)), dim3(num_threads), 0, stream, num_kernels, grad_col, data_value, data_spatial_shapes, data_level_start_index, data_sampling_loc, data_attn_weight, batch_size, spatial_size, num_heads, channels, num_levels, num_query, num_point, grad_value, grad_sampling_loc, grad_attn_weight); break; case 8: hipLaunchKernelGGL(( ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1<scalar_t, 8>) , dim3(GET_BLOCKS(num_actual_kernels, num_threads)), dim3(num_threads), 0, stream, num_kernels, grad_col, data_value, data_spatial_shapes, data_level_start_index, data_sampling_loc, data_attn_weight, batch_size, spatial_size, num_heads, channels, num_levels, num_query, num_point, grad_value, grad_sampling_loc, grad_attn_weight); break; case 16: hipLaunchKernelGGL(( ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1<scalar_t, 16>) , dim3(GET_BLOCKS(num_actual_kernels, num_threads)), dim3(num_threads), 0, stream, num_kernels, grad_col, data_value, data_spatial_shapes, data_level_start_index, data_sampling_loc, data_attn_weight, batch_size, spatial_size, num_heads, channels, num_levels, num_query, num_point, grad_value, grad_sampling_loc, grad_attn_weight); break; case 32: hipLaunchKernelGGL(( ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1<scalar_t, 32>) , dim3(GET_BLOCKS(num_actual_kernels, num_threads)), dim3(num_threads), 0, stream, num_kernels, grad_col, data_value, data_spatial_shapes, data_level_start_index, data_sampling_loc, data_attn_weight, batch_size, spatial_size, num_heads, channels, num_levels, num_query, num_point, grad_value, grad_sampling_loc, grad_attn_weight); break; case 64: hipLaunchKernelGGL(( ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2<scalar_t, 64>) , dim3(GET_BLOCKS(num_actual_kernels, num_threads)), dim3(num_threads), 0, stream, num_kernels, grad_col, data_value, data_spatial_shapes, data_level_start_index, data_sampling_loc, data_attn_weight, batch_size, spatial_size, num_heads, channels, num_levels, num_query, num_point, grad_value, grad_sampling_loc, grad_attn_weight); break; case 128: hipLaunchKernelGGL(( ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2<scalar_t, 128>) , dim3(GET_BLOCKS(num_actual_kernels, num_threads)), dim3(num_threads), 0, stream, num_kernels, grad_col, data_value, data_spatial_shapes, data_level_start_index, data_sampling_loc, data_attn_weight, batch_size, spatial_size, num_heads, channels, num_levels, num_query, num_point, grad_value, grad_sampling_loc, grad_attn_weight); break; case 256: hipLaunchKernelGGL(( ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2<scalar_t, 256>) , dim3(GET_BLOCKS(num_actual_kernels, num_threads)), dim3(num_threads), 0, stream, num_kernels, grad_col, data_value, data_spatial_shapes, data_level_start_index, data_sampling_loc, data_attn_weight, batch_size, spatial_size, num_heads, channels, num_levels, num_query, num_point, grad_value, grad_sampling_loc, grad_attn_weight); break; case 512: hipLaunchKernelGGL(( ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2<scalar_t, 512>) , dim3(GET_BLOCKS(num_actual_kernels, num_threads)), dim3(num_threads), 0, stream, num_kernels, grad_col, data_value, data_spatial_shapes, data_level_start_index, data_sampling_loc, data_attn_weight, batch_size, spatial_size, num_heads, channels, num_levels, num_query, num_point, grad_value, grad_sampling_loc, grad_attn_weight); break; case 1024: hipLaunchKernelGGL(( ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2<scalar_t, 1024>) , dim3(GET_BLOCKS(num_actual_kernels, num_threads)), dim3(num_threads), 0, stream, num_kernels, grad_col, data_value, data_spatial_shapes, data_level_start_index, data_sampling_loc, data_attn_weight, batch_size, spatial_size, num_heads, channels, num_levels, num_query, num_point, grad_value, grad_sampling_loc, grad_attn_weight); break; default: if (channels < 64) { hipLaunchKernelGGL(( ms_deformable_col2im_gpu_kernel_shm_reduce_v1<scalar_t>) , dim3(GET_BLOCKS(num_actual_kernels, num_threads)), dim3(num_threads), num_threads * 3 * sizeof(scalar_t), stream, num_kernels, grad_col, data_value, data_spatial_shapes, data_level_start_index, data_sampling_loc, data_attn_weight, batch_size, spatial_size, num_heads, channels, num_levels, num_query, num_point, grad_value, grad_sampling_loc, grad_attn_weight); } else { hipLaunchKernelGGL(( ms_deformable_col2im_gpu_kernel_shm_reduce_v2<scalar_t>) , dim3(GET_BLOCKS(num_actual_kernels, num_threads)), dim3(num_threads), num_threads * 3 * sizeof(scalar_t), stream, num_kernels, grad_col, data_value, data_spatial_shapes, data_level_start_index, data_sampling_loc, data_attn_weight, batch_size, spatial_size, num_heads, channels, num_levels, num_query, num_point, grad_value, grad_sampling_loc, grad_attn_weight); } } } hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("error in ms_deformable_col2im_cuda: %s\n", hipGetErrorString(err)); } } at::Tensor ms_deform_attn_cuda_forward(const at::Tensor &value, const at::Tensor &spatial_shapes, const at::Tensor &level_start_index, const at::Tensor &sampling_loc, const at::Tensor &attn_weight, const int im2col_step) { AT_ASSERTM(value.is_contiguous(), "value tensor has to be contiguous"); AT_ASSERTM(spatial_shapes.is_contiguous(), "spatial_shapes tensor has to be contiguous"); AT_ASSERTM(level_start_index.is_contiguous(), "level_start_index tensor has to be contiguous"); AT_ASSERTM(sampling_loc.is_contiguous(), "sampling_loc tensor has to be contiguous"); AT_ASSERTM(attn_weight.is_contiguous(), "attn_weight tensor has to be contiguous"); AT_ASSERTM(value.type().is_cuda(), "value must be a CUDA tensor"); AT_ASSERTM(spatial_shapes.type().is_cuda(), "spatial_shapes must be a CUDA tensor"); AT_ASSERTM(level_start_index.type().is_cuda(), "level_start_index must be a CUDA tensor"); AT_ASSERTM(sampling_loc.type().is_cuda(), "sampling_loc must be a CUDA tensor"); AT_ASSERTM(attn_weight.type().is_cuda(), "attn_weight must be a CUDA tensor"); const int batch = value.size(0); const int spatial_size = value.size(1); const int num_heads = value.size(2); const int channels = value.size(3); const int num_levels = spatial_shapes.size(0); const int num_query = sampling_loc.size(1); const int num_point = sampling_loc.size(4); const int im2col_step_ = ::min(batch, im2col_step); AT_ASSERTM(batch % im2col_step_ == 0, "batch(%d) must divide im2col_step(%d)", batch, im2col_step_); auto output = at::zeros({batch, num_query, num_heads, channels}, value.options()); const int batch_n = im2col_step_; auto output_n = output.view( {batch / im2col_step_, batch_n, num_query, num_heads, channels}); auto per_value_size = spatial_size * num_heads * channels; auto per_sample_loc_size = num_query * num_heads * num_levels * num_point * 2; auto per_attn_weight_size = num_query * num_heads * num_levels * num_point; for (int n = 0; n < batch / im2col_step_; ++n) { auto columns = output_n.select(0, n); AT_DISPATCH_FLOATING_TYPES( value.type(), "ms_deform_attn_forward_cuda", ([&] { ms_deformable_im2col_cuda( at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), value.data<scalar_t>() + n * im2col_step_ * per_value_size, spatial_shapes.data<int64_t>(), level_start_index.data<int64_t>(), sampling_loc.data<scalar_t>() + n * im2col_step_ * per_sample_loc_size, attn_weight.data<scalar_t>() + n * im2col_step_ * per_attn_weight_size, batch_n, spatial_size, num_heads, channels, num_levels, num_query, num_point, columns.data<scalar_t>()); })); } output = output.view({batch, num_query, num_heads * channels}); return output; } void ms_deform_attn_cuda_backward( const at::Tensor &value, const at::Tensor &spatial_shapes, const at::Tensor &level_start_index, const at::Tensor &sampling_loc, const at::Tensor &attn_weight, const at::Tensor &grad_output, at::Tensor &grad_value, at::Tensor &grad_sampling_loc, at::Tensor &grad_attn_weight, const int im2col_step) { AT_ASSERTM(value.is_contiguous(), "value tensor has to be contiguous"); AT_ASSERTM(spatial_shapes.is_contiguous(), "spatial_shapes tensor has to be contiguous"); AT_ASSERTM(level_start_index.is_contiguous(), "level_start_index tensor has to be contiguous"); AT_ASSERTM(sampling_loc.is_contiguous(), "sampling_loc tensor has to be contiguous"); AT_ASSERTM(attn_weight.is_contiguous(), "attn_weight tensor has to be contiguous"); AT_ASSERTM(grad_output.is_contiguous(), "grad_output tensor has to be contiguous"); AT_ASSERTM(value.type().is_cuda(), "value must be a CUDA tensor"); AT_ASSERTM(spatial_shapes.type().is_cuda(), "spatial_shapes must be a CUDA tensor"); AT_ASSERTM(level_start_index.type().is_cuda(), "level_start_index must be a CUDA tensor"); AT_ASSERTM(sampling_loc.type().is_cuda(), "sampling_loc must be a CUDA tensor"); AT_ASSERTM(attn_weight.type().is_cuda(), "attn_weight must be a CUDA tensor"); AT_ASSERTM(grad_output.type().is_cuda(), "grad_output must be a CUDA tensor"); const int batch = value.size(0); const int spatial_size = value.size(1); const int num_heads = value.size(2); const int channels = value.size(3); const int num_levels = spatial_shapes.size(0); const int num_query = sampling_loc.size(1); const int num_point = sampling_loc.size(4); const int im2col_step_ = ::min(batch, im2col_step); AT_ASSERTM(batch % im2col_step_ == 0, "batch(%d) must divide im2col_step(%d)", batch, im2col_step_); const int batch_n = im2col_step_; auto per_value_size = spatial_size * num_heads * channels; auto per_sample_loc_size = num_query * num_heads * num_levels * num_point * 2; auto per_attn_weight_size = num_query * num_heads * num_levels * num_point; auto grad_output_n = grad_output.view( {batch / im2col_step_, batch_n, num_query, num_heads, channels}); for (int n = 0; n < batch / im2col_step_; ++n) { auto grad_output_g = grad_output_n.select(0, n); AT_DISPATCH_FLOATING_TYPES( value.type(), "ms_deform_attn_backward_cuda", ([&] { ms_deformable_col2im_cuda( at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), grad_output_g.data<scalar_t>(), value.data<scalar_t>() + n * im2col_step_ * per_value_size, spatial_shapes.data<int64_t>(), level_start_index.data<int64_t>(), sampling_loc.data<scalar_t>() + n * im2col_step_ * per_sample_loc_size, attn_weight.data<scalar_t>() + n * im2col_step_ * per_attn_weight_size, batch_n, spatial_size, num_heads, channels, num_levels, num_query, num_point, grad_value.data<scalar_t>() + n * im2col_step_ * per_value_size, grad_sampling_loc.data<scalar_t>() + n * im2col_step_ * per_sample_loc_size, grad_attn_weight.data<scalar_t>() + n * im2col_step_ * per_attn_weight_size); })); } }
0f9f9b696e7cf6dbfbacea01157699dc6fe6897e.cu
/*! ************************************************************************************************** * Deformable DETR * Copyright (c) 2020 SenseTime. All Rights Reserved. * Licensed under the Apache License, Version 2.0 [see LICENSE for details] ************************************************************************************************** * Modified from *https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 ************************************************************************************************** */ #include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <cuda.h> #include <cuda_runtime.h> #include <THC/THCAtomics.cuh> #include <ms_deform_attn_cuda_kernel.cuh> #include <vector> template <typename scalar_t> void ms_deformable_im2col_cuda(cudaStream_t stream, const scalar_t *data_value, const int64_t *data_spatial_shapes, const int64_t *data_level_start_index, const scalar_t *data_sampling_loc, const scalar_t *data_attn_weight, const int batch_size, const int spatial_size, const int num_heads, const int channels, const int num_levels, const int num_query, const int num_point, scalar_t *data_col) { const int num_kernels = batch_size * num_query * num_heads * channels; const int num_actual_kernels = batch_size * num_query * num_heads * channels; const int num_threads = CUDA_NUM_THREADS; ms_deformable_im2col_gpu_kernel<scalar_t> <<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads, 0, stream>>>( num_kernels, data_value, data_spatial_shapes, data_level_start_index, data_sampling_loc, data_attn_weight, batch_size, spatial_size, num_heads, channels, num_levels, num_query, num_point, data_col); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in ms_deformable_im2col_cuda: %s\n", cudaGetErrorString(err)); } } template <typename scalar_t> void ms_deformable_col2im_cuda( cudaStream_t stream, const scalar_t *grad_col, const scalar_t *data_value, const int64_t *data_spatial_shapes, const int64_t *data_level_start_index, const scalar_t *data_sampling_loc, const scalar_t *data_attn_weight, const int batch_size, const int spatial_size, const int num_heads, const int channels, const int num_levels, const int num_query, const int num_point, scalar_t *grad_value, scalar_t *grad_sampling_loc, scalar_t *grad_attn_weight) { const int num_threads = (channels > CUDA_NUM_THREADS) ? CUDA_NUM_THREADS : channels; const int num_kernels = batch_size * num_query * num_heads * channels; const int num_actual_kernels = batch_size * num_query * num_heads * channels; if (channels > 1024) { if ((channels & 1023) == 0) { ms_deformable_col2im_gpu_kernel_shm_reduce_v2_multi_blocks<scalar_t> <<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads, num_threads * 3 * sizeof(scalar_t), stream>>>( num_kernels, grad_col, data_value, data_spatial_shapes, data_level_start_index, data_sampling_loc, data_attn_weight, batch_size, spatial_size, num_heads, channels, num_levels, num_query, num_point, grad_value, grad_sampling_loc, grad_attn_weight); } else { ms_deformable_col2im_gpu_kernel_gm<scalar_t> <<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads, 0, stream>>>(num_kernels, grad_col, data_value, data_spatial_shapes, data_level_start_index, data_sampling_loc, data_attn_weight, batch_size, spatial_size, num_heads, channels, num_levels, num_query, num_point, grad_value, grad_sampling_loc, grad_attn_weight); } } else { switch (channels) { case 1: ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1<scalar_t, 1> <<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads, 0, stream>>>(num_kernels, grad_col, data_value, data_spatial_shapes, data_level_start_index, data_sampling_loc, data_attn_weight, batch_size, spatial_size, num_heads, channels, num_levels, num_query, num_point, grad_value, grad_sampling_loc, grad_attn_weight); break; case 2: ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1<scalar_t, 2> <<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads, 0, stream>>>(num_kernels, grad_col, data_value, data_spatial_shapes, data_level_start_index, data_sampling_loc, data_attn_weight, batch_size, spatial_size, num_heads, channels, num_levels, num_query, num_point, grad_value, grad_sampling_loc, grad_attn_weight); break; case 4: ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1<scalar_t, 4> <<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads, 0, stream>>>(num_kernels, grad_col, data_value, data_spatial_shapes, data_level_start_index, data_sampling_loc, data_attn_weight, batch_size, spatial_size, num_heads, channels, num_levels, num_query, num_point, grad_value, grad_sampling_loc, grad_attn_weight); break; case 8: ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1<scalar_t, 8> <<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads, 0, stream>>>(num_kernels, grad_col, data_value, data_spatial_shapes, data_level_start_index, data_sampling_loc, data_attn_weight, batch_size, spatial_size, num_heads, channels, num_levels, num_query, num_point, grad_value, grad_sampling_loc, grad_attn_weight); break; case 16: ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1<scalar_t, 16> <<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads, 0, stream>>>(num_kernels, grad_col, data_value, data_spatial_shapes, data_level_start_index, data_sampling_loc, data_attn_weight, batch_size, spatial_size, num_heads, channels, num_levels, num_query, num_point, grad_value, grad_sampling_loc, grad_attn_weight); break; case 32: ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1<scalar_t, 32> <<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads, 0, stream>>>(num_kernels, grad_col, data_value, data_spatial_shapes, data_level_start_index, data_sampling_loc, data_attn_weight, batch_size, spatial_size, num_heads, channels, num_levels, num_query, num_point, grad_value, grad_sampling_loc, grad_attn_weight); break; case 64: ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2<scalar_t, 64> <<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads, 0, stream>>>(num_kernels, grad_col, data_value, data_spatial_shapes, data_level_start_index, data_sampling_loc, data_attn_weight, batch_size, spatial_size, num_heads, channels, num_levels, num_query, num_point, grad_value, grad_sampling_loc, grad_attn_weight); break; case 128: ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2<scalar_t, 128> <<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads, 0, stream>>>(num_kernels, grad_col, data_value, data_spatial_shapes, data_level_start_index, data_sampling_loc, data_attn_weight, batch_size, spatial_size, num_heads, channels, num_levels, num_query, num_point, grad_value, grad_sampling_loc, grad_attn_weight); break; case 256: ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2<scalar_t, 256> <<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads, 0, stream>>>(num_kernels, grad_col, data_value, data_spatial_shapes, data_level_start_index, data_sampling_loc, data_attn_weight, batch_size, spatial_size, num_heads, channels, num_levels, num_query, num_point, grad_value, grad_sampling_loc, grad_attn_weight); break; case 512: ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2<scalar_t, 512> <<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads, 0, stream>>>(num_kernels, grad_col, data_value, data_spatial_shapes, data_level_start_index, data_sampling_loc, data_attn_weight, batch_size, spatial_size, num_heads, channels, num_levels, num_query, num_point, grad_value, grad_sampling_loc, grad_attn_weight); break; case 1024: ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2<scalar_t, 1024> <<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads, 0, stream>>>(num_kernels, grad_col, data_value, data_spatial_shapes, data_level_start_index, data_sampling_loc, data_attn_weight, batch_size, spatial_size, num_heads, channels, num_levels, num_query, num_point, grad_value, grad_sampling_loc, grad_attn_weight); break; default: if (channels < 64) { ms_deformable_col2im_gpu_kernel_shm_reduce_v1<scalar_t> <<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads, num_threads * 3 * sizeof(scalar_t), stream>>>( num_kernels, grad_col, data_value, data_spatial_shapes, data_level_start_index, data_sampling_loc, data_attn_weight, batch_size, spatial_size, num_heads, channels, num_levels, num_query, num_point, grad_value, grad_sampling_loc, grad_attn_weight); } else { ms_deformable_col2im_gpu_kernel_shm_reduce_v2<scalar_t> <<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads, num_threads * 3 * sizeof(scalar_t), stream>>>( num_kernels, grad_col, data_value, data_spatial_shapes, data_level_start_index, data_sampling_loc, data_attn_weight, batch_size, spatial_size, num_heads, channels, num_levels, num_query, num_point, grad_value, grad_sampling_loc, grad_attn_weight); } } } cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in ms_deformable_col2im_cuda: %s\n", cudaGetErrorString(err)); } } at::Tensor ms_deform_attn_cuda_forward(const at::Tensor &value, const at::Tensor &spatial_shapes, const at::Tensor &level_start_index, const at::Tensor &sampling_loc, const at::Tensor &attn_weight, const int im2col_step) { AT_ASSERTM(value.is_contiguous(), "value tensor has to be contiguous"); AT_ASSERTM(spatial_shapes.is_contiguous(), "spatial_shapes tensor has to be contiguous"); AT_ASSERTM(level_start_index.is_contiguous(), "level_start_index tensor has to be contiguous"); AT_ASSERTM(sampling_loc.is_contiguous(), "sampling_loc tensor has to be contiguous"); AT_ASSERTM(attn_weight.is_contiguous(), "attn_weight tensor has to be contiguous"); AT_ASSERTM(value.type().is_cuda(), "value must be a CUDA tensor"); AT_ASSERTM(spatial_shapes.type().is_cuda(), "spatial_shapes must be a CUDA tensor"); AT_ASSERTM(level_start_index.type().is_cuda(), "level_start_index must be a CUDA tensor"); AT_ASSERTM(sampling_loc.type().is_cuda(), "sampling_loc must be a CUDA tensor"); AT_ASSERTM(attn_weight.type().is_cuda(), "attn_weight must be a CUDA tensor"); const int batch = value.size(0); const int spatial_size = value.size(1); const int num_heads = value.size(2); const int channels = value.size(3); const int num_levels = spatial_shapes.size(0); const int num_query = sampling_loc.size(1); const int num_point = sampling_loc.size(4); const int im2col_step_ = std::min(batch, im2col_step); AT_ASSERTM(batch % im2col_step_ == 0, "batch(%d) must divide im2col_step(%d)", batch, im2col_step_); auto output = at::zeros({batch, num_query, num_heads, channels}, value.options()); const int batch_n = im2col_step_; auto output_n = output.view( {batch / im2col_step_, batch_n, num_query, num_heads, channels}); auto per_value_size = spatial_size * num_heads * channels; auto per_sample_loc_size = num_query * num_heads * num_levels * num_point * 2; auto per_attn_weight_size = num_query * num_heads * num_levels * num_point; for (int n = 0; n < batch / im2col_step_; ++n) { auto columns = output_n.select(0, n); AT_DISPATCH_FLOATING_TYPES( value.type(), "ms_deform_attn_forward_cuda", ([&] { ms_deformable_im2col_cuda( at::cuda::getCurrentCUDAStream(), value.data<scalar_t>() + n * im2col_step_ * per_value_size, spatial_shapes.data<int64_t>(), level_start_index.data<int64_t>(), sampling_loc.data<scalar_t>() + n * im2col_step_ * per_sample_loc_size, attn_weight.data<scalar_t>() + n * im2col_step_ * per_attn_weight_size, batch_n, spatial_size, num_heads, channels, num_levels, num_query, num_point, columns.data<scalar_t>()); })); } output = output.view({batch, num_query, num_heads * channels}); return output; } void ms_deform_attn_cuda_backward( const at::Tensor &value, const at::Tensor &spatial_shapes, const at::Tensor &level_start_index, const at::Tensor &sampling_loc, const at::Tensor &attn_weight, const at::Tensor &grad_output, at::Tensor &grad_value, at::Tensor &grad_sampling_loc, at::Tensor &grad_attn_weight, const int im2col_step) { AT_ASSERTM(value.is_contiguous(), "value tensor has to be contiguous"); AT_ASSERTM(spatial_shapes.is_contiguous(), "spatial_shapes tensor has to be contiguous"); AT_ASSERTM(level_start_index.is_contiguous(), "level_start_index tensor has to be contiguous"); AT_ASSERTM(sampling_loc.is_contiguous(), "sampling_loc tensor has to be contiguous"); AT_ASSERTM(attn_weight.is_contiguous(), "attn_weight tensor has to be contiguous"); AT_ASSERTM(grad_output.is_contiguous(), "grad_output tensor has to be contiguous"); AT_ASSERTM(value.type().is_cuda(), "value must be a CUDA tensor"); AT_ASSERTM(spatial_shapes.type().is_cuda(), "spatial_shapes must be a CUDA tensor"); AT_ASSERTM(level_start_index.type().is_cuda(), "level_start_index must be a CUDA tensor"); AT_ASSERTM(sampling_loc.type().is_cuda(), "sampling_loc must be a CUDA tensor"); AT_ASSERTM(attn_weight.type().is_cuda(), "attn_weight must be a CUDA tensor"); AT_ASSERTM(grad_output.type().is_cuda(), "grad_output must be a CUDA tensor"); const int batch = value.size(0); const int spatial_size = value.size(1); const int num_heads = value.size(2); const int channels = value.size(3); const int num_levels = spatial_shapes.size(0); const int num_query = sampling_loc.size(1); const int num_point = sampling_loc.size(4); const int im2col_step_ = std::min(batch, im2col_step); AT_ASSERTM(batch % im2col_step_ == 0, "batch(%d) must divide im2col_step(%d)", batch, im2col_step_); const int batch_n = im2col_step_; auto per_value_size = spatial_size * num_heads * channels; auto per_sample_loc_size = num_query * num_heads * num_levels * num_point * 2; auto per_attn_weight_size = num_query * num_heads * num_levels * num_point; auto grad_output_n = grad_output.view( {batch / im2col_step_, batch_n, num_query, num_heads, channels}); for (int n = 0; n < batch / im2col_step_; ++n) { auto grad_output_g = grad_output_n.select(0, n); AT_DISPATCH_FLOATING_TYPES( value.type(), "ms_deform_attn_backward_cuda", ([&] { ms_deformable_col2im_cuda( at::cuda::getCurrentCUDAStream(), grad_output_g.data<scalar_t>(), value.data<scalar_t>() + n * im2col_step_ * per_value_size, spatial_shapes.data<int64_t>(), level_start_index.data<int64_t>(), sampling_loc.data<scalar_t>() + n * im2col_step_ * per_sample_loc_size, attn_weight.data<scalar_t>() + n * im2col_step_ * per_attn_weight_size, batch_n, spatial_size, num_heads, channels, num_levels, num_query, num_point, grad_value.data<scalar_t>() + n * im2col_step_ * per_value_size, grad_sampling_loc.data<scalar_t>() + n * im2col_step_ * per_sample_loc_size, grad_attn_weight.data<scalar_t>() + n * im2col_step_ * per_attn_weight_size); })); } }
11a2bec942b979d76c5cdea8b842b4cc573b1309.hip
// !!! This is a file automatically generated by hipify!!! ///524288/float32/input0,524288/float32/input1:524288/float32/output0 // backend = c-cuda // CONFIG: // COMPUTE_V1: - einstein_v2("output0[N] = input0[N] + input1[N]", input_dict={"input0": {"dtype": "float32", "shape": [1024 * 512]}, "input1": {"dtype": "float32", "shape": [1024 * 512]}}) #include <hip/hip_runtime.h> #include <hip/hip_fp16.h> #include <mma.h> #include <stdio.h> #include <stdlib.h> #include <chrono> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include <algorithm> const int input_size = 4096; const int output_size = 32 * 1024 * 1024; #include "cu_helper.h" __global__ void Broadcast(float* __restrict__ input0, float* __restrict__ output0) { // [thread_extent] blockIdx.x = 1024 // [thread_extent] threadIdx.x = 4 // [thread_extent] blockIdx.y = 64 // [thread_extent] threadIdx.y = 128 output0[(((((((int)blockIdx.x) * 32768) + (((int)threadIdx.x) * 8192)) + (((int)blockIdx.y) * 128)) + ((int)threadIdx.y)))] = input0[(((((int)blockIdx.x) * 4) + ((int)threadIdx.x)))]; } __global__ void Broadcast1(float* __restrict__ input0, float* __restrict__ output0) { // [thread_extent] blockIdx.x = 512 // [thread_extent] threadIdx.x = 2 // [thread_extent] blockIdx.y = 64 // [thread_extent] threadIdx.y = 64 output0[(((((((int)blockIdx.x) * 65536) + (((int)threadIdx.x) * 32768)) + (((int)blockIdx.y) * 128)) + ((int)threadIdx.y)))] = input0[(((((int)blockIdx.x) * 8) + (((int)threadIdx.x) * 4)))]; output0[((((((((int)blockIdx.x) * 65536) + (((int)threadIdx.x) * 32768)) + (((int)blockIdx.y) * 128)) + ((int)threadIdx.y)) + 64))] = input0[(((((int)blockIdx.x) * 8) + (((int)threadIdx.x) * 4)))]; output0[((((((((int)blockIdx.x) * 65536) + (((int)threadIdx.x) * 32768)) + (((int)blockIdx.y) * 128)) + ((int)threadIdx.y)) + 8192))] = input0[((((((int)blockIdx.x) * 8) + (((int)threadIdx.x) * 4)) + 1))]; output0[((((((((int)blockIdx.x) * 65536) + (((int)threadIdx.x) * 32768)) + (((int)blockIdx.y) * 128)) + ((int)threadIdx.y)) + 8256))] = input0[((((((int)blockIdx.x) * 8) + (((int)threadIdx.x) * 4)) + 1))]; output0[((((((((int)blockIdx.x) * 65536) + (((int)threadIdx.x) * 32768)) + (((int)blockIdx.y) * 128)) + ((int)threadIdx.y)) + 16384))] = input0[((((((int)blockIdx.x) * 8) + (((int)threadIdx.x) * 4)) + 2))]; output0[((((((((int)blockIdx.x) * 65536) + (((int)threadIdx.x) * 32768)) + (((int)blockIdx.y) * 128)) + ((int)threadIdx.y)) + 16448))] = input0[((((((int)blockIdx.x) * 8) + (((int)threadIdx.x) * 4)) + 2))]; output0[((((((((int)blockIdx.x) * 65536) + (((int)threadIdx.x) * 32768)) + (((int)blockIdx.y) * 128)) + ((int)threadIdx.y)) + 24576))] = input0[((((((int)blockIdx.x) * 8) + (((int)threadIdx.x) * 4)) + 3))]; output0[((((((((int)blockIdx.x) * 65536) + (((int)threadIdx.x) * 32768)) + (((int)blockIdx.y) * 128)) + ((int)threadIdx.y)) + 24640))] = input0[((((((int)blockIdx.x) * 8) + (((int)threadIdx.x) * 4)) + 3))]; } bool check(float *A, float *B) { for (int i = 0; i < 4096; ++ i) for (int j = 0; j < 8192; ++ j) if (abs(A[i] - B[i * 8192 + j]) > 1e-6) { fprintf(stderr, "%d %d %f %f\n", i, j, A[i], B[i * 8192 + j]); return false; } return true; } int main(int argc, char *argv[]) { checkCudaErrors(hipInit(0)); hipDevice_t device; checkCudaErrors(hipDeviceGet(&device, 0)); hipCtx_t context; checkCudaErrors(hipCtxCreate(&context, HIP_CTX_SCHED_AUTO/*HIP_CTX_SCHED_YIELD*/ | HIP_CTX_MAP_HOST, device)); float *Ah, *Bh; float *Ad, *Bd; Ah = (float*)malloc(input_size * sizeof(float)); Bh = (float*)malloc(output_size * sizeof(float)); hipMalloc((void **)&Ad, input_size * sizeof(float)); hipMalloc((void **)&Bd, output_size * sizeof(float)); for (int i = 0; i < input_size; ++ i) Ah[i] = rand(); hipMemcpy(Ad, Ah, input_size * sizeof(float), hipMemcpyHostToDevice); dim3 Grid(1024, 64, 1); dim3 Block(4, 128, 1); for (int i = 0; i < 1; ++ i) { hipLaunchKernelGGL(( Broadcast) , dim3(Grid), dim3(Block), 0, 0, Ad, Bd); hipDeviceSynchronize(); } hipMemcpy(Bh, Bd, output_size * sizeof(float), hipMemcpyDeviceToHost); if (!check(Ah, Bh)) fprintf(stderr, "error!\n"); else fprintf(stderr, "pass!\n"); dim3 Grid1(512, 64, 1); dim3 Block1(2, 64, 1); for (int i = 0; i < 1; ++ i) { hipLaunchKernelGGL(( Broadcast1) , dim3(Grid1), dim3(Block1), 0, 0, Ad, Bd); hipDeviceSynchronize(); } hipMemcpy(Bh, Bd, output_size * sizeof(float), hipMemcpyDeviceToHost); if (!check(Ah, Bh)) fprintf(stderr, "error!\n"); else fprintf(stderr, "pass!\n"); }
11a2bec942b979d76c5cdea8b842b4cc573b1309.cu
///524288/float32/input0,524288/float32/input1:524288/float32/output0 // backend = c-cuda // CONFIG: // COMPUTE_V1: - einstein_v2("output0[N] = input0[N] + input1[N]", input_dict={"input0": {"dtype": "float32", "shape": [1024 * 512]}, "input1": {"dtype": "float32", "shape": [1024 * 512]}}) #include <cuda_runtime.h> #include <cuda_fp16.h> #include <mma.h> #include <stdio.h> #include <stdlib.h> #include <chrono> #include <cuda.h> #include <cuda_runtime_api.h> #include <algorithm> const int input_size = 4096; const int output_size = 32 * 1024 * 1024; #include "cu_helper.h" __global__ void Broadcast(float* __restrict__ input0, float* __restrict__ output0) { // [thread_extent] blockIdx.x = 1024 // [thread_extent] threadIdx.x = 4 // [thread_extent] blockIdx.y = 64 // [thread_extent] threadIdx.y = 128 output0[(((((((int)blockIdx.x) * 32768) + (((int)threadIdx.x) * 8192)) + (((int)blockIdx.y) * 128)) + ((int)threadIdx.y)))] = input0[(((((int)blockIdx.x) * 4) + ((int)threadIdx.x)))]; } __global__ void Broadcast1(float* __restrict__ input0, float* __restrict__ output0) { // [thread_extent] blockIdx.x = 512 // [thread_extent] threadIdx.x = 2 // [thread_extent] blockIdx.y = 64 // [thread_extent] threadIdx.y = 64 output0[(((((((int)blockIdx.x) * 65536) + (((int)threadIdx.x) * 32768)) + (((int)blockIdx.y) * 128)) + ((int)threadIdx.y)))] = input0[(((((int)blockIdx.x) * 8) + (((int)threadIdx.x) * 4)))]; output0[((((((((int)blockIdx.x) * 65536) + (((int)threadIdx.x) * 32768)) + (((int)blockIdx.y) * 128)) + ((int)threadIdx.y)) + 64))] = input0[(((((int)blockIdx.x) * 8) + (((int)threadIdx.x) * 4)))]; output0[((((((((int)blockIdx.x) * 65536) + (((int)threadIdx.x) * 32768)) + (((int)blockIdx.y) * 128)) + ((int)threadIdx.y)) + 8192))] = input0[((((((int)blockIdx.x) * 8) + (((int)threadIdx.x) * 4)) + 1))]; output0[((((((((int)blockIdx.x) * 65536) + (((int)threadIdx.x) * 32768)) + (((int)blockIdx.y) * 128)) + ((int)threadIdx.y)) + 8256))] = input0[((((((int)blockIdx.x) * 8) + (((int)threadIdx.x) * 4)) + 1))]; output0[((((((((int)blockIdx.x) * 65536) + (((int)threadIdx.x) * 32768)) + (((int)blockIdx.y) * 128)) + ((int)threadIdx.y)) + 16384))] = input0[((((((int)blockIdx.x) * 8) + (((int)threadIdx.x) * 4)) + 2))]; output0[((((((((int)blockIdx.x) * 65536) + (((int)threadIdx.x) * 32768)) + (((int)blockIdx.y) * 128)) + ((int)threadIdx.y)) + 16448))] = input0[((((((int)blockIdx.x) * 8) + (((int)threadIdx.x) * 4)) + 2))]; output0[((((((((int)blockIdx.x) * 65536) + (((int)threadIdx.x) * 32768)) + (((int)blockIdx.y) * 128)) + ((int)threadIdx.y)) + 24576))] = input0[((((((int)blockIdx.x) * 8) + (((int)threadIdx.x) * 4)) + 3))]; output0[((((((((int)blockIdx.x) * 65536) + (((int)threadIdx.x) * 32768)) + (((int)blockIdx.y) * 128)) + ((int)threadIdx.y)) + 24640))] = input0[((((((int)blockIdx.x) * 8) + (((int)threadIdx.x) * 4)) + 3))]; } bool check(float *A, float *B) { for (int i = 0; i < 4096; ++ i) for (int j = 0; j < 8192; ++ j) if (abs(A[i] - B[i * 8192 + j]) > 1e-6) { fprintf(stderr, "%d %d %f %f\n", i, j, A[i], B[i * 8192 + j]); return false; } return true; } int main(int argc, char *argv[]) { checkCudaErrors(cuInit(0)); CUdevice device; checkCudaErrors(cuDeviceGet(&device, 0)); CUcontext context; checkCudaErrors(cuCtxCreate(&context, CU_CTX_SCHED_AUTO/*CU_CTX_SCHED_YIELD*/ | CU_CTX_MAP_HOST, device)); float *Ah, *Bh; float *Ad, *Bd; Ah = (float*)malloc(input_size * sizeof(float)); Bh = (float*)malloc(output_size * sizeof(float)); cudaMalloc((void **)&Ad, input_size * sizeof(float)); cudaMalloc((void **)&Bd, output_size * sizeof(float)); for (int i = 0; i < input_size; ++ i) Ah[i] = rand(); cudaMemcpy(Ad, Ah, input_size * sizeof(float), cudaMemcpyHostToDevice); dim3 Grid(1024, 64, 1); dim3 Block(4, 128, 1); for (int i = 0; i < 1; ++ i) { Broadcast <<<Grid, Block>>> (Ad, Bd); cudaDeviceSynchronize(); } cudaMemcpy(Bh, Bd, output_size * sizeof(float), cudaMemcpyDeviceToHost); if (!check(Ah, Bh)) fprintf(stderr, "error!\n"); else fprintf(stderr, "pass!\n"); dim3 Grid1(512, 64, 1); dim3 Block1(2, 64, 1); for (int i = 0; i < 1; ++ i) { Broadcast1 <<<Grid1, Block1>>> (Ad, Bd); cudaDeviceSynchronize(); } cudaMemcpy(Bh, Bd, output_size * sizeof(float), cudaMemcpyDeviceToHost); if (!check(Ah, Bh)) fprintf(stderr, "error!\n"); else fprintf(stderr, "pass!\n"); }
111dfb1df01fe1feadb7662c89e3ac38d0e9281b.hip
// !!! This is a file automatically generated by hipify!!! // includes, system #include <stdio.h> #include <assert.h> // Here you can set the device ID that was assigned to you #define MYDEVICE 0 // Simple utility function to check for CUDA runtime errors void checkCUDAError(const char *msg); /////////////////////////////////////////////////////////////////////////////// // Program main /////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { hipSetDevice(MYDEVICE); // pointer and dimension for host memory int n, dimA; float *h_a; // pointers for device memory float *d_a, *d_b; // allocate and initialize host memory // Bonus: try using hipHostMalloc in place of malloc dimA = 8; h_a = (float *) malloc(dimA*sizeof(float)); for (n=0; n<dimA; n++) { h_a[n] = (float) n; } // Part 1 of 5: allocate device memory size_t memSize = dimA*sizeof(float); hipMalloc((void **)&d_a, memSize); hipMalloc((void **)&d_b, memSize); // Part 2 of 5: host to device memory copy hipMemcpy(d_a, h_a, memSize, hipMemcpyHostToDevice); // Part 3 of 5: device to device memory copy hipMemcpy(d_b, d_a, memSize, hipMemcpyDeviceToDevice); // clear host memory for (n=0; n<dimA; n++) { h_a[n] = 0.f; } // Part 4 of 5: device to host copy hipMemcpy(h_a, d_a ,memSize, hipMemcpyDeviceToHost); // Check for any CUDA errors checkCUDAError("hipMemcpy calls"); // verify the data on the host is correct for (n=0; n<dimA; n++) { assert(h_a[n] == (float) n); } // Part 5 of 5: free device memory pointers d_a and d_b hipFree(d_a); hipFree(d_b); // Check for any CUDA errors checkCUDAError("hipFree"); // free host memory pointer h_a free(h_a); // If the program makes it this far, then the results are correct and // there are no run-time errors. Good work! printf("Correct!\n"); return 0; } void checkCUDAError(const char *msg) { hipError_t err = hipGetLastError(); if( hipSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString( err) ); exit(-1); } }
111dfb1df01fe1feadb7662c89e3ac38d0e9281b.cu
// includes, system #include <stdio.h> #include <assert.h> // Here you can set the device ID that was assigned to you #define MYDEVICE 0 // Simple utility function to check for CUDA runtime errors void checkCUDAError(const char *msg); /////////////////////////////////////////////////////////////////////////////// // Program main /////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { cudaSetDevice(MYDEVICE); // pointer and dimension for host memory int n, dimA; float *h_a; // pointers for device memory float *d_a, *d_b; // allocate and initialize host memory // Bonus: try using cudaMallocHost in place of malloc dimA = 8; h_a = (float *) malloc(dimA*sizeof(float)); for (n=0; n<dimA; n++) { h_a[n] = (float) n; } // Part 1 of 5: allocate device memory size_t memSize = dimA*sizeof(float); cudaMalloc((void **)&d_a, memSize); cudaMalloc((void **)&d_b, memSize); // Part 2 of 5: host to device memory copy cudaMemcpy(d_a, h_a, memSize, cudaMemcpyHostToDevice); // Part 3 of 5: device to device memory copy cudaMemcpy(d_b, d_a, memSize, cudaMemcpyDeviceToDevice); // clear host memory for (n=0; n<dimA; n++) { h_a[n] = 0.f; } // Part 4 of 5: device to host copy cudaMemcpy(h_a, d_a ,memSize, cudaMemcpyDeviceToHost); // Check for any CUDA errors checkCUDAError("cudaMemcpy calls"); // verify the data on the host is correct for (n=0; n<dimA; n++) { assert(h_a[n] == (float) n); } // Part 5 of 5: free device memory pointers d_a and d_b cudaFree(d_a); cudaFree(d_b); // Check for any CUDA errors checkCUDAError("cudaFree"); // free host memory pointer h_a free(h_a); // If the program makes it this far, then the results are correct and // there are no run-time errors. Good work! printf("Correct!\n"); return 0; } void checkCUDAError(const char *msg) { cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) ); exit(-1); } }
f21f73554ffe692c7ec5213166d31e63f0bbeb8d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> __global__ void index_print_kernel() { int idx = blockIdx.x * blockDim.x + threadIdx.x; int warp_idx = threadIdx.x / warpSize; int lane_idx = threadIdx.x & (warpSize - 1); if ((lane_idx & (warpSize / 2 - 1)) == 0) { // thread, block, warp, lane printf(" %5d\t%5d\t %2d\t%2d\n", idx, blockIdx.x, warp_idx, lane_idx); } } int main(int argc, char* argv[]) { if (argc == 1) { puts("Please put Block Size and Thread Block Size.."); puts("./cuda_thread_block [grid size] [block size]"); puts("e.g.) ./cuda_thread_block 4 128"); exit(1); } int gridSize = atoi(argv[1]); int blockSize = atoi(argv[2]); puts("thread, block, warp, lane"); hipLaunchKernelGGL(( index_print_kernel), dim3(gridSize), dim3(blockSize), 0, 0, ); hipDeviceSynchronize(); return 0; }
f21f73554ffe692c7ec5213166d31e63f0bbeb8d.cu
#include <stdio.h> #include <stdlib.h> __global__ void index_print_kernel() { int idx = blockIdx.x * blockDim.x + threadIdx.x; int warp_idx = threadIdx.x / warpSize; int lane_idx = threadIdx.x & (warpSize - 1); if ((lane_idx & (warpSize / 2 - 1)) == 0) { // thread, block, warp, lane printf(" %5d\t%5d\t %2d\t%2d\n", idx, blockIdx.x, warp_idx, lane_idx); } } int main(int argc, char* argv[]) { if (argc == 1) { puts("Please put Block Size and Thread Block Size.."); puts("./cuda_thread_block [grid size] [block size]"); puts("e.g.) ./cuda_thread_block 4 128"); exit(1); } int gridSize = atoi(argv[1]); int blockSize = atoi(argv[2]); puts("thread, block, warp, lane"); index_print_kernel<<<gridSize, blockSize>>>(); cudaDeviceSynchronize(); return 0; }
afa8a76534da828735e44ae369e02524d2ab6e6e.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <cstdlib> #include <cstdio> #include <time.h> #include <sys/time.h> #include <hip/hip_runtime.h> #include<cuda_runtime.h> #include<device_launch_parameters.h> #define BLOCKSIZE_x 32 #define BLOCKSIZE_y 32 #define l 100 #define dt 0.01 #define D 10.0 #define d 0.4 //using namespace std; double my_gettimeofday(){ struct timeval tmp_time; gettimeofday(&tmp_time, NULL); return tmp_time.tv_sec + (tmp_time.tv_usec * 1.0e-6L); } /*****************/ /* CUDA MEMCHECK */ /*****************/ #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, char *file, int line, bool abort = true) { if (code != hipSuccess) { fprintf(stderr, "GPUassert: %s %s %dn", hipGetErrorString(code), file, line); if (abort) { exit(code); } } } int iDivUp(int hostPtr, int b){ return ((hostPtr % b) != 0) ? (hostPtr / b + 1) : (hostPtr / b); } __global__ void solve(double A[l][l]) { int i = threadIdx.x+blockIdx.x * blockDim.x; int j = threadIdx.y+blockIdx.y * blockDim.y; if( i < l-1 && j < l-1 && (i!=0) && (j!=0) ) { A[i][j] = A[i][j]*(1-d*dt)/l+(A[i-1][j] + A[i+1][j] + A[i][j+1] + A[i][j-1] - 4*A[i][j])*D*dt/l; } } int main(){ float phi0=0.4; double cpu_mesh[l][l]; double cpu_res[l][l]; double (*gpu_mesh)[l]; //pointers to arrays of dimension N double (*gpu_res)[l]; /* Initializing cpu_mesh with source at the center*/ for(int i=0 ; i< l; i++){ for(int j=0 ; j<l ; ++j){ cpu_mesh[i][j]=0.0; } } cpu_mesh[l/2-1][l/2 -1]=phi0; cpu_mesh[l/2][l/2 -1]=phi0; cpu_mesh[l/2][l/2]=phi0; cpu_mesh[l/2-1][l/2]=phi0; /* Allocation */ hipMalloc((void**)&gpu_mesh, (l*l)*sizeof(double)); hipMalloc((void**)&gpu_res, (l*l)*sizeof(double)); //copying from host to device double debut = my_gettimeofday(); double debutTransfert = my_gettimeofday(); gpuErrchk(hipMemcpy(gpu_mesh, cpu_mesh, (l*l)*sizeof(double), hipMemcpyHostToDevice)); double finTransfert = my_gettimeofday(); std::cout << "Transfert CPU vers GPU :" << finTransfert-debutTransfert << std::endl; dim3 gridSize(iDivUp(l, BLOCKSIZE_x), iDivUp(l, BLOCKSIZE_y)); dim3 blockSize(BLOCKSIZE_y, BLOCKSIZE_x); //solve <<<gridSize, blockSize>>> (gpu_mesh, gpu_res, D, dt, d); for(int i=0; i<1000; ++i){ hipLaunchKernelGGL(( solve), dim3(gridSize), dim3(blockSize), 0, 0, gpu_mesh); } debutTransfert = my_gettimeofday(); hipMemcpy(cpu_res, gpu_mesh, (l*l)*sizeof(double), hipMemcpyDeviceToHost); std::cout << "Transfert GPU vers CPU :" << finTransfert-debutTransfert << std::endl; finTransfert = my_gettimeofday(); double fin= my_gettimeofday(); std::cout << "Temps calcul :" << fin-debut << std::endl; /*for (int i = 0; i < l; i++){ for (int j = 0; j < l; j++){ std::cout << cpu_res[i][j] << " "; } std::cout << std::endl; }*/ return 0; }
afa8a76534da828735e44ae369e02524d2ab6e6e.cu
#include <iostream> #include <cstdlib> #include <cstdio> #include <time.h> #include <sys/time.h> #include <cuda.h> #include<cuda_runtime.h> #include<device_launch_parameters.h> #define BLOCKSIZE_x 32 #define BLOCKSIZE_y 32 #define l 100 #define dt 0.01 #define D 10.0 #define d 0.4 //using namespace std; double my_gettimeofday(){ struct timeval tmp_time; gettimeofday(&tmp_time, NULL); return tmp_time.tv_sec + (tmp_time.tv_usec * 1.0e-6L); } /*****************/ /* CUDA MEMCHECK */ /*****************/ #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, char *file, int line, bool abort = true) { if (code != cudaSuccess) { fprintf(stderr, "GPUassert: %s %s %dn", cudaGetErrorString(code), file, line); if (abort) { exit(code); } } } int iDivUp(int hostPtr, int b){ return ((hostPtr % b) != 0) ? (hostPtr / b + 1) : (hostPtr / b); } __global__ void solve(double A[l][l]) { int i = threadIdx.x+blockIdx.x * blockDim.x; int j = threadIdx.y+blockIdx.y * blockDim.y; if( i < l-1 && j < l-1 && (i!=0) && (j!=0) ) { A[i][j] = A[i][j]*(1-d*dt)/l+(A[i-1][j] + A[i+1][j] + A[i][j+1] + A[i][j-1] - 4*A[i][j])*D*dt/l; } } int main(){ float phi0=0.4; double cpu_mesh[l][l]; double cpu_res[l][l]; double (*gpu_mesh)[l]; //pointers to arrays of dimension N double (*gpu_res)[l]; /* Initializing cpu_mesh with source at the center*/ for(int i=0 ; i< l; i++){ for(int j=0 ; j<l ; ++j){ cpu_mesh[i][j]=0.0; } } cpu_mesh[l/2-1][l/2 -1]=phi0; cpu_mesh[l/2][l/2 -1]=phi0; cpu_mesh[l/2][l/2]=phi0; cpu_mesh[l/2-1][l/2]=phi0; /* Allocation */ cudaMalloc((void**)&gpu_mesh, (l*l)*sizeof(double)); cudaMalloc((void**)&gpu_res, (l*l)*sizeof(double)); //copying from host to device double debut = my_gettimeofday(); double debutTransfert = my_gettimeofday(); gpuErrchk(cudaMemcpy(gpu_mesh, cpu_mesh, (l*l)*sizeof(double), cudaMemcpyHostToDevice)); double finTransfert = my_gettimeofday(); std::cout << "Transfert CPU vers GPU :" << finTransfert-debutTransfert << std::endl; dim3 gridSize(iDivUp(l, BLOCKSIZE_x), iDivUp(l, BLOCKSIZE_y)); dim3 blockSize(BLOCKSIZE_y, BLOCKSIZE_x); //solve <<<gridSize, blockSize>>> (gpu_mesh, gpu_res, D, dt, d); for(int i=0; i<1000; ++i){ solve<<<gridSize, blockSize>>> (gpu_mesh); } debutTransfert = my_gettimeofday(); cudaMemcpy(cpu_res, gpu_mesh, (l*l)*sizeof(double), cudaMemcpyDeviceToHost); std::cout << "Transfert GPU vers CPU :" << finTransfert-debutTransfert << std::endl; finTransfert = my_gettimeofday(); double fin= my_gettimeofday(); std::cout << "Temps calcul :" << fin-debut << std::endl; /*for (int i = 0; i < l; i++){ for (int j = 0; j < l; j++){ std::cout << cpu_res[i][j] << " "; } std::cout << std::endl; }*/ return 0; }
3fcbed87e6bec93424fed087b74da1c7a89086f4.hip
// !!! This is a file automatically generated by hipify!!! //////////////////////////////////////////////////////////////////////////////// // Copyright (c) 2014-2021, Lawrence Livermore National Security, LLC. // Produced at the Lawrence Livermore National Laboratory. // Written by the LBANN Research Team (B. Van Essen, et al.) listed in // the CONTRIBUTORS file. <[email protected]> // // LLNL-CODE-697807. // All rights reserved. // // This file is part of LBANN: Livermore Big Artificial Neural Network // Toolkit. For details, see http://software.llnl.gov/LBANN or // https://github.com/LLNL/LBANN. // // Licensed under the Apache License, Version 2.0 (the "Licensee"); you // may not use this file except in compliance with the License. You may // obtain a copy of the License at: // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or // implied. See the License for the specific language governing // permissions and limitations under the license. //////////////////////////////////////////////////////////////////////////////// #include "lbann/operators/math/abs.hpp" #include "lbann/base.hpp" #include "lbann/utils/gpu/helpers.hpp" #include "common_hip.cuh" namespace lbann { namespace { template <typename DataT> struct AbsOpImpl { using ComplexT = thrust::complex<DataT>; inline __device__ DataT operator()(DataT const& x) const { return gpu_lib::abs(x); } inline __device__ DataT operator()(ComplexT const& x) const { return thrust::abs(x); } inline __device__ DataT operator()(DataT const& x, DataT const& dy) const { return (x > (DataT) 0. ? dy : (x < (DataT) 0. ? -dy : (DataT) 0.)); } inline __device__ ComplexT operator()(ComplexT const& x, DataT const& dy) const { return (x == ComplexT(0.f) ? ComplexT(0.f) : thrust::conj(x * (dy / thrust::abs(x)))); } };// struct AbsOpImpl } // namespace template <typename DataT, El::Device Device> void AbsOperator<DataT, Device>::fp_compute_local( std::vector<ConstLocalInputTensorType> inputs, std::vector<LocalOutputTensorType> outputs) const { LBANN_ASSERT_DEBUG(inputs.size() == 1); LBANN_ASSERT_DEBUG(outputs.size() == 1); auto const& input = inputs.front().data(); auto& output = outputs.front().data(); El::EntrywiseMap(input, output, AbsOpImpl<El::Base<DataT>>{}); } template <typename DataT, El::Device Device> void AbsOperator<DataT, Device>::bp_compute_local( std::vector<ConstLocalInputTensorType> inputs, std::vector<ConstLocalOutputTensorType> grads_wrt_outputs, std::vector<LocalInputTensorType> grads_wrt_inputs) const { LBANN_ASSERT_DEBUG(inputs.size() == 1); LBANN_ASSERT_DEBUG(grads_wrt_outputs.size() == 1); LBANN_ASSERT_DEBUG(grads_wrt_inputs.size() == 1); auto const& input = inputs.front().data(); auto const& grad_wrt_output = grads_wrt_outputs.front().data(); auto& grad_wrt_input = grads_wrt_inputs.front().data(); internal::EntrywiseZipInto(input, grad_wrt_output, grad_wrt_input, AbsOpImpl<El::Base<DataT>>{}); } #define PROTO(T) template class AbsOperator<T, El::Device::GPU> #define LBANN_INSTANTIATE_GPU_HALF #include "lbann/macros/instantiate.hpp" #undef LBANN_INSTANTIATE_GPU_HALF #undef PROTO #define PROTO(T) template class AbsOperator<El::Complex<T>, El::Device::GPU> #include "lbann/macros/instantiate.hpp" } // namespace lbann
3fcbed87e6bec93424fed087b74da1c7a89086f4.cu
//////////////////////////////////////////////////////////////////////////////// // Copyright (c) 2014-2021, Lawrence Livermore National Security, LLC. // Produced at the Lawrence Livermore National Laboratory. // Written by the LBANN Research Team (B. Van Essen, et al.) listed in // the CONTRIBUTORS file. <[email protected]> // // LLNL-CODE-697807. // All rights reserved. // // This file is part of LBANN: Livermore Big Artificial Neural Network // Toolkit. For details, see http://software.llnl.gov/LBANN or // https://github.com/LLNL/LBANN. // // Licensed under the Apache License, Version 2.0 (the "Licensee"); you // may not use this file except in compliance with the License. You may // obtain a copy of the License at: // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or // implied. See the License for the specific language governing // permissions and limitations under the license. //////////////////////////////////////////////////////////////////////////////// #include "lbann/operators/math/abs.hpp" #include "lbann/base.hpp" #include "lbann/utils/gpu/helpers.hpp" #include "common.cuh" namespace lbann { namespace { template <typename DataT> struct AbsOpImpl { using ComplexT = thrust::complex<DataT>; inline __device__ DataT operator()(DataT const& x) const { return gpu_lib::abs(x); } inline __device__ DataT operator()(ComplexT const& x) const { return thrust::abs(x); } inline __device__ DataT operator()(DataT const& x, DataT const& dy) const { return (x > (DataT) 0. ? dy : (x < (DataT) 0. ? -dy : (DataT) 0.)); } inline __device__ ComplexT operator()(ComplexT const& x, DataT const& dy) const { return (x == ComplexT(0.f) ? ComplexT(0.f) : thrust::conj(x * (dy / thrust::abs(x)))); } };// struct AbsOpImpl } // namespace template <typename DataT, El::Device Device> void AbsOperator<DataT, Device>::fp_compute_local( std::vector<ConstLocalInputTensorType> inputs, std::vector<LocalOutputTensorType> outputs) const { LBANN_ASSERT_DEBUG(inputs.size() == 1); LBANN_ASSERT_DEBUG(outputs.size() == 1); auto const& input = inputs.front().data(); auto& output = outputs.front().data(); El::EntrywiseMap(input, output, AbsOpImpl<El::Base<DataT>>{}); } template <typename DataT, El::Device Device> void AbsOperator<DataT, Device>::bp_compute_local( std::vector<ConstLocalInputTensorType> inputs, std::vector<ConstLocalOutputTensorType> grads_wrt_outputs, std::vector<LocalInputTensorType> grads_wrt_inputs) const { LBANN_ASSERT_DEBUG(inputs.size() == 1); LBANN_ASSERT_DEBUG(grads_wrt_outputs.size() == 1); LBANN_ASSERT_DEBUG(grads_wrt_inputs.size() == 1); auto const& input = inputs.front().data(); auto const& grad_wrt_output = grads_wrt_outputs.front().data(); auto& grad_wrt_input = grads_wrt_inputs.front().data(); internal::EntrywiseZipInto(input, grad_wrt_output, grad_wrt_input, AbsOpImpl<El::Base<DataT>>{}); } #define PROTO(T) template class AbsOperator<T, El::Device::GPU> #define LBANN_INSTANTIATE_GPU_HALF #include "lbann/macros/instantiate.hpp" #undef LBANN_INSTANTIATE_GPU_HALF #undef PROTO #define PROTO(T) template class AbsOperator<El::Complex<T>, El::Device::GPU> #include "lbann/macros/instantiate.hpp" } // namespace lbann
d471e0336e73c152c7eac965eec692817575305b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "FullConnect.h" #include "../common/cuBase.h" #include "../common/cuMatrix.h" #include "../common/Config.h" #include "../layers/BranchLayer.h" #include <math.h> __global__ void g_FullConnectDropout(float * w, float * dropW, float* afterDropW, int len); __global__ void g_FullConnectFeedforward(float* acti, float* b, int NumofNeurons, int NONLIN); __global__ void g_FullConnectActi(float* acti, float* b, int NumofNeurons, int NONLIN); __global__ void g_FullConnectWgrad(float* wgrad, float* w, /*float* dropM,*/ int len, float lambda, int batch); __global__ void g_FullConnectActi(float* acti, float* b, int NumofNeurons, int NONLIN) { float* data = acti + blockIdx.x * NumofNeurons; for(int id = 0; id < NumofNeurons; id += blockDim.x) { int idx = id + threadIdx.x; if(idx < NumofNeurons) { float val = data[idx]; val = val + b[idx]; data[idx] = d_nonLinearity(val, NONLIN); } } } __global__ void g_FullConnectWgrad(float* wgrad, float* w, int len, float lambda, int batch) { for(int i = 0; i < len; i += blockDim.x * gridDim.x) { int id = i + blockDim.x * blockIdx.x + threadIdx.x; if(id < len) { if(fabs(lambda) < 1e-10) wgrad[id] = wgrad[id] / batch /** dropM[id]*/; else wgrad[id] = (wgrad[id] / batch + lambda * w[id]) /** dropM[id]*/; } } } /* * blocks : cuFullConnectActi[hl]->rows; * threads : dim3(min(512, len)); */ __global__ void g_FullConnectFeedforward(float* acti, float* b, int NumofNeurons, int NONLIN) { float* data = acti + blockIdx.x * NumofNeurons; for(int id = 0; id < NumofNeurons; id += blockDim.x) { int idx = id + threadIdx.x; if(idx < NumofNeurons) { float val = data[idx]; val = val + b[idx]; data[idx] = d_nonLinearity(val, NONLIN); } } } __global__ void g_FullConnectDropout(float * outputs, float * drop, int len) { for(int i = 0; i < len; i += blockDim.x * gridDim.x) { int id = i + blockIdx.x * blockDim.x + threadIdx.x; if(id < len) { outputs[id] = outputs[id] * drop[id]; } } } void FullConnect::feedforward() { //drop dim3 block = inputs->rows; dim3 thread = min(512, inputs->cols); //convert hipLaunchKernelGGL(( g_convert), dim3(block), dim3(thread), 0, 0, inputs->getDev(), inputs_format->getDev(), inputs->rows, inputs->cols, inputs->channels); checkCudaErrors(hipStreamSynchronize(0)); getLastCudaError("g_convert"); matrixMulTB(inputs_format, w, outputs); thread = min(512, outputs->cols); block = outputs->rows; hipLaunchKernelGGL(( g_FullConnectActi), dim3(block), dim3(thread), 0, 0, outputs->getDev(), b->getDev(), outputs->cols, NON_LINEARITY); checkCudaErrors(hipStreamSynchronize(0)); getLastCudaError("g_FullConnectActi"); if(dropRate > 0.0){ if(!Config::instance()->isTraining()){ dropScale(drop, dropRate); } else{ static int dropId = 0; if(dropId % 5 == 0){ dropOut(); if(dropId >= 5) dropId = 1; } dropId++; } thread = min(512, w->getLen()); block = min(512, (w->getLen() + thread.x - 1) / thread.x); hipLaunchKernelGGL(( g_FullConnectDropout), dim3(block), dim3(thread), 0, 0, outputs->getDev(), drop->getDev(), drop->getLen()); checkCudaErrors(hipStreamSynchronize(0)); getLastCudaError("g_FullConnectDropout"); }else{ } } void FullConnect::calCost() { cost->gpuClear(); if(fabs(lambda) >= 1e-10) { hipLaunchKernelGGL(( g_getCost_2), dim3(dim3(1)), dim3(dim3(256)), sizeof(float) * 256, 0, cost->getDev(), w->getDev(), lambda, w->getLen()); hipStreamSynchronize(0); getLastCudaError("g_getCost_2"); } } FullConnect::FullConnect(std::string name) { m_name = name; ConfigFC* config = (ConfigFC*)Config::instance()->getLayerByName(m_name); LayerBase * preLayer = (LayerBase*)Layers::instance()->get(config->m_input); inputs = preLayer->getOutputs(); if(inputs == NULL){ /*inputs = NULL the type must be BranchLayers*/ Assert(Config::instance()->getLayerByName(config->m_input)->isBranchLayer()); Assert(config->m_subInput != std::string("NULL")); BranchLayer* bl = static_cast<BranchLayer*>(preLayer); inputs = bl->getSubOutput(config->m_subInput); preDelta = bl->getSubCurDelta(config->m_subInput); }else{ preDelta = preLayer->getCurDelta(); } batch = Config::instance()->getBatchSize(); lambda = config->m_weightDecay; inputsize = inputs->cols * inputs->channels; outputsize = config->m_numFullConnectNeurons; dropRate = config->m_dropoutRate; NON_LINEARITY = config->m_nonLinearity; inputs_format = new cuMatrix<float>(inputs->rows, inputs->cols * inputs->channels, 1); outputs = new cuMatrix<float>(batch, outputsize, 1); curDelta = new cuMatrix<float>(batch, outputsize, 1); if(fabs(dropRate) < 0.0001) drop = NULL; else drop = new cuMatrix<float>(batch, outputsize, 1); this->setPreDelta(preDelta); w = new cuMatrix<float>(outputsize, inputsize, 1); wgrad = new cuMatrix<float>(outputsize, inputsize, 1); b = new cuMatrix<float>(outputsize, 1, 1); bgrad = new cuMatrix<float>(outputsize, 1, 1); momentum_w = new cuMatrix<float>(outputsize, inputsize, 1); momentum_b = new cuMatrix<float>(outputsize, 1, 1); this->initRandom(); Layers::instance()->set(m_name, this); } void FullConnect::dropOut() { dropDelta(drop, dropRate); } void FullConnect::backpropagation() { if(NON_LINEARITY >= 0){ hipLaunchKernelGGL(( g_dnonLinearity), dim3(dim3(256)), dim3(dim3(256)), 0, 0, curDelta->getDev(), outputs->getDev(), outputs->getLen(), NON_LINEARITY); hipStreamSynchronize(0); getLastCudaError("g_dnonLinearity"); } //preDelta matrixMul(curDelta, w, preDelta_format); dim3 block = batch; dim3 thread= min(512, preDelta->channels * preDelta->cols); hipLaunchKernelGGL(( g_preDeltaFormat), dim3(block), dim3(thread), 0, 0, preDelta_format->getDev(), preDelta->getDev(), preDelta->rows, preDelta->cols, preDelta->channels); hipStreamSynchronize(0); getLastCudaError("g_preDeltaFormat"); } void FullConnect::getGrad() { matrixMulTA(curDelta, inputs_format, wgrad); hipLaunchKernelGGL(( g_FullConnectWgrad), dim3(dim3(256)), dim3(dim3(256)), 0, 0, wgrad->getDev(), w->getDev(), wgrad->getLen(), lambda, batch); hipStreamSynchronize(0); getLastCudaError("g_FullConnectWgrad"); if(curDelta->rows > MAX_THREADS) { printf("getFullConnectDelta g_getBgrad > MAX_THREADS\n"); exit(0); } hipLaunchKernelGGL(( g_getBgrad), dim3(dim3(curDelta->cols)), dim3(dim3(curDelta->rows)), sizeof(float) * curDelta->rows, 0, curDelta->getDev(), bgrad->getDev(), batch); hipStreamSynchronize(0); } void FullConnect::updateWeight() { dim3 block = min((momentum_w->getLen() + 255) / 256, 5120); dim3 thread= 256; hipLaunchKernelGGL(( g_vecAdd), dim3(block), dim3(thread), 0, Layers::instance()->get_stream(), momentum_w->getDev(), wgrad->getDev(), w->getDev(), momentum_b->getDev(), bgrad->getDev(), b->getDev(), wgrad->getLen(), bgrad->getLen(), Config::instance()->getMomentum(), Config::instance()->getLrate(), Config::instance()->getLrate()); } void FullConnect::clearMomentum() { momentum_b->gpuClear(); momentum_w->gpuClear(); } cuMatrix<float>* FullConnect::getOutputs() { return outputs; } cuMatrix<float>* FullConnect::getCurDelta() { return curDelta; } void FullConnect::setPreDelta(cuMatrix<float>* _preDelta) { preDelta = _preDelta; preDelta_format = new cuMatrix<float>(preDelta->rows, preDelta->cols * preDelta->channels, 1); } void FullConnect::convert() { int threads = min(512, inputs->cols); hipLaunchKernelGGL(( g_convert), dim3(dim3(inputs->rows)), dim3(threads), 0, 0, inputs->getDev(), inputs_format->getDev(), inputs->rows, inputs->cols, inputs->channels); hipStreamSynchronize(0); getLastCudaError("convert"); } void FullConnect::initRandom() { //srand(clock()); float initW = Config::instance()->getLayerByName(m_name)->m_initW; //initMatrix(w, epsilon); if(Config::instance()->getLayerByName(m_name)->isGaussian()){ float epsilon = initW; for(int c = 0; c < w->channels; c++) { float r1 = 0.01f + 5.0f * (rand()) / RAND_MAX; float r2 = 0.01f + 5.0f * (rand()) / RAND_MAX; createGaussian(w->getHost() + c * w->getArea(), r1,r2, w->rows, w->cols, w->channels, epsilon); } w->toGpu(); } else{ //float epsilon = sqrt((float)6) / sqrt((float)(outputs->rows + outputs->cols)); for(int j = 0; j < w->getLen(); j++){ w->getHost()[j] = initW * (2.0f * rand() / RAND_MAX - 1.0f); //printf("%f ", w[i]->hostData[j]); }//printf("\n"); w->toGpu(); } //float epsilon = sqrt((float)6) / sqrt((float)(inputsize + outputsize)); w->toGpu(); } void FullConnect::initFromCheckpoint(FILE* file) { float val = 0.0; for(int c = 0; c < w->channels; c++){ for(int i = 0; i < w->rows; i++){ for(int j = 0; j < w->cols; j++){ if(fscanf(file, "%f", &val) == EOF){ LOG("scanf fail", "result/log.txt"); } w->set(i, j, c, val); } } } for(int c = 0; c < b->channels; c++){ for(int i = 0; i < b->rows; i++){ for(int j = 0; j < b->cols; j++){ if(fscanf(file, "%f", &val) == EOF){ LOG("scanf fail", "result/log.txt"); } b->set(i, j, c, val); } } } w->toGpu(); b->toGpu(); } void FullConnect::save(FILE* file) { w->toCpu(); b->toCpu(); for(int c = 0; c < w->channels; c++){ for(int i = 0; i < w->rows; i++){ for(int j = 0; j < w->cols; j++){ fprintf(file, "%f ", w->get(i,j,c)); } } } for(int c = 0; c < b->channels; c++){ for(int i = 0; i < b->rows; i++){ for(int j = 0; j < b->cols; j++){ fprintf(file, "%f ", b->get(i,j, c)); } } } }
d471e0336e73c152c7eac965eec692817575305b.cu
#include "FullConnect.h" #include "../common/cuBase.h" #include "../common/cuMatrix.h" #include "../common/Config.h" #include "../layers/BranchLayer.h" #include <math.h> __global__ void g_FullConnectDropout(float * w, float * dropW, float* afterDropW, int len); __global__ void g_FullConnectFeedforward(float* acti, float* b, int NumofNeurons, int NONLIN); __global__ void g_FullConnectActi(float* acti, float* b, int NumofNeurons, int NONLIN); __global__ void g_FullConnectWgrad(float* wgrad, float* w, /*float* dropM,*/ int len, float lambda, int batch); __global__ void g_FullConnectActi(float* acti, float* b, int NumofNeurons, int NONLIN) { float* data = acti + blockIdx.x * NumofNeurons; for(int id = 0; id < NumofNeurons; id += blockDim.x) { int idx = id + threadIdx.x; if(idx < NumofNeurons) { float val = data[idx]; val = val + b[idx]; data[idx] = d_nonLinearity(val, NONLIN); } } } __global__ void g_FullConnectWgrad(float* wgrad, float* w, int len, float lambda, int batch) { for(int i = 0; i < len; i += blockDim.x * gridDim.x) { int id = i + blockDim.x * blockIdx.x + threadIdx.x; if(id < len) { if(fabs(lambda) < 1e-10) wgrad[id] = wgrad[id] / batch /** dropM[id]*/; else wgrad[id] = (wgrad[id] / batch + lambda * w[id]) /** dropM[id]*/; } } } /* * blocks : cuFullConnectActi[hl]->rows; * threads : dim3(min(512, len)); */ __global__ void g_FullConnectFeedforward(float* acti, float* b, int NumofNeurons, int NONLIN) { float* data = acti + blockIdx.x * NumofNeurons; for(int id = 0; id < NumofNeurons; id += blockDim.x) { int idx = id + threadIdx.x; if(idx < NumofNeurons) { float val = data[idx]; val = val + b[idx]; data[idx] = d_nonLinearity(val, NONLIN); } } } __global__ void g_FullConnectDropout(float * outputs, float * drop, int len) { for(int i = 0; i < len; i += blockDim.x * gridDim.x) { int id = i + blockIdx.x * blockDim.x + threadIdx.x; if(id < len) { outputs[id] = outputs[id] * drop[id]; } } } void FullConnect::feedforward() { //drop dim3 block = inputs->rows; dim3 thread = min(512, inputs->cols); //convert g_convert<<<block, thread>>>( inputs->getDev(), inputs_format->getDev(), inputs->rows, inputs->cols, inputs->channels); checkCudaErrors(cudaStreamSynchronize(0)); getLastCudaError("g_convert"); matrixMulTB(inputs_format, w, outputs); thread = min(512, outputs->cols); block = outputs->rows; g_FullConnectActi<<<block, thread>>>(outputs->getDev(), b->getDev(), outputs->cols, NON_LINEARITY); checkCudaErrors(cudaStreamSynchronize(0)); getLastCudaError("g_FullConnectActi"); if(dropRate > 0.0){ if(!Config::instance()->isTraining()){ dropScale(drop, dropRate); } else{ static int dropId = 0; if(dropId % 5 == 0){ dropOut(); if(dropId >= 5) dropId = 1; } dropId++; } thread = min(512, w->getLen()); block = min(512, (w->getLen() + thread.x - 1) / thread.x); g_FullConnectDropout<<<block, thread>>>(outputs->getDev(), drop->getDev(), drop->getLen()); checkCudaErrors(cudaStreamSynchronize(0)); getLastCudaError("g_FullConnectDropout"); }else{ } } void FullConnect::calCost() { cost->gpuClear(); if(fabs(lambda) >= 1e-10) { g_getCost_2<<<dim3(1), dim3(256), sizeof(float) * 256>>>(cost->getDev(), w->getDev(), lambda, w->getLen()); cudaStreamSynchronize(0); getLastCudaError("g_getCost_2"); } } FullConnect::FullConnect(std::string name) { m_name = name; ConfigFC* config = (ConfigFC*)Config::instance()->getLayerByName(m_name); LayerBase * preLayer = (LayerBase*)Layers::instance()->get(config->m_input); inputs = preLayer->getOutputs(); if(inputs == NULL){ /*inputs = NULL the type must be BranchLayers*/ Assert(Config::instance()->getLayerByName(config->m_input)->isBranchLayer()); Assert(config->m_subInput != std::string("NULL")); BranchLayer* bl = static_cast<BranchLayer*>(preLayer); inputs = bl->getSubOutput(config->m_subInput); preDelta = bl->getSubCurDelta(config->m_subInput); }else{ preDelta = preLayer->getCurDelta(); } batch = Config::instance()->getBatchSize(); lambda = config->m_weightDecay; inputsize = inputs->cols * inputs->channels; outputsize = config->m_numFullConnectNeurons; dropRate = config->m_dropoutRate; NON_LINEARITY = config->m_nonLinearity; inputs_format = new cuMatrix<float>(inputs->rows, inputs->cols * inputs->channels, 1); outputs = new cuMatrix<float>(batch, outputsize, 1); curDelta = new cuMatrix<float>(batch, outputsize, 1); if(fabs(dropRate) < 0.0001) drop = NULL; else drop = new cuMatrix<float>(batch, outputsize, 1); this->setPreDelta(preDelta); w = new cuMatrix<float>(outputsize, inputsize, 1); wgrad = new cuMatrix<float>(outputsize, inputsize, 1); b = new cuMatrix<float>(outputsize, 1, 1); bgrad = new cuMatrix<float>(outputsize, 1, 1); momentum_w = new cuMatrix<float>(outputsize, inputsize, 1); momentum_b = new cuMatrix<float>(outputsize, 1, 1); this->initRandom(); Layers::instance()->set(m_name, this); } void FullConnect::dropOut() { dropDelta(drop, dropRate); } void FullConnect::backpropagation() { if(NON_LINEARITY >= 0){ g_dnonLinearity<<<dim3(256), dim3(256)>>>(curDelta->getDev(), outputs->getDev(), outputs->getLen(), NON_LINEARITY); cudaStreamSynchronize(0); getLastCudaError("g_dnonLinearity"); } //preDelta matrixMul(curDelta, w, preDelta_format); dim3 block = batch; dim3 thread= min(512, preDelta->channels * preDelta->cols); g_preDeltaFormat<<<block, thread>>>( preDelta_format->getDev(), preDelta->getDev(), preDelta->rows, preDelta->cols, preDelta->channels); cudaStreamSynchronize(0); getLastCudaError("g_preDeltaFormat"); } void FullConnect::getGrad() { matrixMulTA(curDelta, inputs_format, wgrad); g_FullConnectWgrad<<<dim3(256), dim3(256)>>>(wgrad->getDev(), w->getDev(), wgrad->getLen(), lambda, batch); cudaStreamSynchronize(0); getLastCudaError("g_FullConnectWgrad"); if(curDelta->rows > MAX_THREADS) { printf("getFullConnectDelta g_getBgrad > MAX_THREADS\n"); exit(0); } g_getBgrad<<<dim3(curDelta->cols), dim3(curDelta->rows), sizeof(float) * curDelta->rows>>> (curDelta->getDev(), bgrad->getDev(), batch); cudaStreamSynchronize(0); } void FullConnect::updateWeight() { dim3 block = min((momentum_w->getLen() + 255) / 256, 5120); dim3 thread= 256; g_vecAdd<<<block, thread, 0, Layers::instance()->get_stream()>>>(momentum_w->getDev(), wgrad->getDev(), w->getDev(), momentum_b->getDev(), bgrad->getDev(), b->getDev(), wgrad->getLen(), bgrad->getLen(), Config::instance()->getMomentum(), Config::instance()->getLrate(), Config::instance()->getLrate()); } void FullConnect::clearMomentum() { momentum_b->gpuClear(); momentum_w->gpuClear(); } cuMatrix<float>* FullConnect::getOutputs() { return outputs; } cuMatrix<float>* FullConnect::getCurDelta() { return curDelta; } void FullConnect::setPreDelta(cuMatrix<float>* _preDelta) { preDelta = _preDelta; preDelta_format = new cuMatrix<float>(preDelta->rows, preDelta->cols * preDelta->channels, 1); } void FullConnect::convert() { int threads = min(512, inputs->cols); g_convert<<<dim3(inputs->rows), threads>>> (inputs->getDev(), inputs_format->getDev(), inputs->rows, inputs->cols, inputs->channels); cudaStreamSynchronize(0); getLastCudaError("convert"); } void FullConnect::initRandom() { //srand(clock()); float initW = Config::instance()->getLayerByName(m_name)->m_initW; //initMatrix(w, epsilon); if(Config::instance()->getLayerByName(m_name)->isGaussian()){ float epsilon = initW; for(int c = 0; c < w->channels; c++) { float r1 = 0.01f + 5.0f * (rand()) / RAND_MAX; float r2 = 0.01f + 5.0f * (rand()) / RAND_MAX; createGaussian(w->getHost() + c * w->getArea(), r1,r2, w->rows, w->cols, w->channels, epsilon); } w->toGpu(); } else{ //float epsilon = sqrt((float)6) / sqrt((float)(outputs->rows + outputs->cols)); for(int j = 0; j < w->getLen(); j++){ w->getHost()[j] = initW * (2.0f * rand() / RAND_MAX - 1.0f); //printf("%f ", w[i]->hostData[j]); }//printf("\n"); w->toGpu(); } //float epsilon = sqrt((float)6) / sqrt((float)(inputsize + outputsize)); w->toGpu(); } void FullConnect::initFromCheckpoint(FILE* file) { float val = 0.0; for(int c = 0; c < w->channels; c++){ for(int i = 0; i < w->rows; i++){ for(int j = 0; j < w->cols; j++){ if(fscanf(file, "%f", &val) == EOF){ LOG("scanf fail", "result/log.txt"); } w->set(i, j, c, val); } } } for(int c = 0; c < b->channels; c++){ for(int i = 0; i < b->rows; i++){ for(int j = 0; j < b->cols; j++){ if(fscanf(file, "%f", &val) == EOF){ LOG("scanf fail", "result/log.txt"); } b->set(i, j, c, val); } } } w->toGpu(); b->toGpu(); } void FullConnect::save(FILE* file) { w->toCpu(); b->toCpu(); for(int c = 0; c < w->channels; c++){ for(int i = 0; i < w->rows; i++){ for(int j = 0; j < w->cols; j++){ fprintf(file, "%f ", w->get(i,j,c)); } } } for(int c = 0; c < b->channels; c++){ for(int i = 0; i < b->rows; i++){ for(int j = 0; j < b->cols; j++){ fprintf(file, "%f ", b->get(i,j, c)); } } } }
6b530d7cd1258e71340728e547d3a4f59ce43bcb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cstdio> #include <cstdint> #include <cstring> #include <iostream> #include <fstream> #include <string> #include <numeric> #include <sys/time.h> #include <cstdarg> #include <algorithm> #include <cassert> #include "utils.h" #define VLEN 5 // The decimal buffer length #define CAP 1000000000 // The capacity of one decimal buffer word #define CAP_POW 9 // The capacity power of one decimal buffer word // #define CAP 100 // #define CAP_POW 2 /* * decimal: representing a decimal number using VLEN int32_t; * sign: 0: positive, 1: negative * prec: the precision of the decimal; * frac: the fraction part of the decimal; * v[VLEN]: the values; v[0] is the least significant part; */ struct decimal { uint32_t sign:1; int32_t prec:8; int32_t frac:7; int32_t v[VLEN]; decimal(int32_t v, int f); decimal(std::string, int); decimal(); friend std::ostream& operator<<(std::ostream&, const decimal &); }; /* * initialize a decimal using int32_t * value: the initialization number, without decimal point * f: the decimal point position */ decimal::decimal(int32_t value, int f = 0) { sign = value < 0; frac = f; if(value < 0) value = -value; for(int i = 0; i < VLEN; i++){ v[i] = value % CAP; value /= CAP; } } decimal::decimal() { decimal(0); } /* * initialize a decimal using std::string * str: the initialzation number * pow: change the representation according to the power, e.g., ("1", -2) would be v[0]=100, frac = 2 */ decimal::decimal(std::string str, int pow = 0) { size_t pos; memset(v, 0, VLEN*sizeof(int32_t)); pos = str.find('.'); if(str[0] == '-'){ sign = 1; str.erase(str.begin()); }else sign = 0; if(pos == std::string::npos){ frac = 0; }else{ frac = -(str.size() - pos - 1); str.erase(str.begin() + pos); } if(pow < 0){ for(int i = 0; i < -pow; i++) str += "0"; frac += pow; } int i = 0; while(str.size() >= CAP_POW){ v[i++] = stoi(str.substr(str.size()-CAP_POW, CAP_POW)); str = str.substr(0, str.size() - CAP_POW); } v[i] = stoi(str); } std::ostream &operator<<(std::ostream& os, const decimal &d) { os << "frac: " << d.frac << ", "; os << (d.sign?"-":"+"); for(int i = VLEN-1; i >= 0; i--) os << d.v[i] << (i != 0 ? " " : ""); return os; } /* * Add two absolute decimals hold in two consecutive VLEN int32_t buffer * a: the first decimal * b: the second decimal * res: the result decimal. This could be a or b * overflow: indicate if overflow */ __host__ __device__ void abs_add(int32_t *a, int32_t *b, int32_t *res, int32_t &overflow) { overflow = 0; for(int i = 0; i < VLEN; i++){ res[i] = a[i] + b[i] + overflow; overflow = res[i] / CAP; res[i] = res[i] % CAP; } } /* * Compare two absolute decimals hold in two consecutive VLEN int32_t buffer * a: the first decimal * b: the second decimal * Return values: >0 (a > b), 0 (a == b), <0 (a < b) * */ __host__ __device__ int32_t abs_cmp(int32_t *a, int32_t *b) { int32_t res = 0; #pragma unroll for (int i = VLEN - 1; i >= 0 && res == 0; i--) { res = a[i] - b[i]; } return res; } /* * Substract an absolute decimal (b) from an absolute decimal (a) */ __host__ __device__ void abs_sub(int32_t *a, int32_t *b, int32_t *res) { int32_t *sub1, *sub2; int32_t r = abs_cmp(a, b); if(r >= 0){ sub1 = a; sub2 = b; }else{ sub1 = b; sub2 = a; } int32_t carry = 0; for(int i = 0; i < VLEN; i++){ res[i] = sub1[i] + CAP - sub2[i] - carry; carry = !(res[i] / CAP); res[i] = res[i] % CAP; } } /* * Add two decimals, the fraction length should be the same before adding */ __host__ __device__ void var_add(struct decimal &v1, struct decimal &v2, struct decimal &res) { int32_t overflow = 0; res.prec = v1.prec; res.frac = v1.frac; res.sign = v1.sign; if(v1.sign ^ v2.sign == 0){ abs_add(v1.v, v2.v, res.v, overflow); }else{ abs_sub(v1.v, v2.v, res.v); res.sign = (abs_cmp(v1.v, v2.v) > 0 && v1.sign) || (abs_cmp(v1.v, v2.v) < 0 && !v1.sign); } } __host__ __device__ void abs_lshift(int32_t *a, int len, int n, int32_t *res) { } /* * right shift an absolute decimal * a: the input decimal * len: the length of the input buffer * n: how many digits it shifts * res: the output decimal, could be same as the input buffer */ __host__ __device__ void abs_rshift(int32_t *a, int len, int n, int32_t *res) { int32_t rword = n / CAP_POW; int32_t rbit = n % CAP_POW; int32_t rd = 1; int32_t rl = 1; for(int i = 0; i < rbit; i++) rd *= 10; for(int i = 0; i < CAP_POW - rbit; i++) rl *= 10; for(int i = 0; i < len - rword - 1; i++){ res[i] = a[rword + i] / rd + a[rword + i + 1] % rd * rl; } res[len - rword - 1] = a[len - 1] / rd; for(int i = len - rword; i < len; i++) res[i] = 0; } /* * multiply two absolute decimals * a: the first decimal * b: the second decimal * res: the result decimal. The buffer size should be VLEN*2 */ __host__ __device__ void abs_mul(int32_t *a, int32_t *b, int32_t *res) { int64_t temp; int32_t carry; for(int i = 0; i < VLEN * 2; i++) res[i] = 0; for(int i = 0; i < VLEN; i++){ carry = 0; for(int j = 0; j < VLEN; j++){ temp = (int64_t)a[i] * b[j] + res[i+j] + carry; carry = temp / CAP; res[i+j] = temp % CAP; } res[i+VLEN] = carry; } } /* * multiply two decimals */ __host__ __device__ void var_mul(struct decimal &v1, struct decimal &v2, struct decimal &res) { int32_t overflow = 0; res.prec = v1.prec; res.frac = v1.frac + v2.frac; res.sign = v1.sign ^ v2.sign; int32_t inner_res[VLEN*2]; abs_mul(v1.v, v2.v, inner_res); //abs_rshift(inner_res, VLEN*2, res.prec - (v2.frac + v1.frac), inner_res); // or abs_lfshit for(int i = 0; i < VLEN; i++) res.v[i] = inner_res[i]; for(int i = VLEN; i < 2*VLEN; i++) overflow = (overflow || inner_res[i]); } /* * multiply two decimals, and set the fraction of the result to frac */ __host__ __device__ void var_mul(struct decimal &v1, struct decimal &v2, struct decimal &res, int frac) { int32_t overflow = 0; res.prec = v1.prec; res.frac = v1.frac + v2.frac; res.sign = v1.sign ^ v2.sign; int32_t inner_res[VLEN*2]; abs_mul(v1.v, v2.v, inner_res); if(res.frac < frac){ abs_rshift(inner_res, VLEN*2, frac - res.frac, inner_res); res.frac = frac; }else if(res.frac > frac){ abs_lshift(inner_res, VLEN*2, frac - res.frac, inner_res); res.frac = frac; } for(int i = 0; i < VLEN; i++) res.v[i] = inner_res[i]; for(int i = VLEN; i < 2*VLEN; i++) overflow = (overflow || inner_res[i]); } /* * accumulate decimals in the threadblock */ __global__ void accumulate(decimal *a, int n, decimal *res) { extern __shared__ decimal sdata[]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; if(i < n) memcpy(sdata+tid, a+i, sizeof(decimal)); else memset(sdata+tid, 0, sizeof(decimal)); __syncthreads(); for(unsigned int s = blockDim.x/2; s > 0; s >>= 1){ if(tid < s){ var_add(sdata[tid], sdata[tid+s], sdata[tid]); } __syncthreads(); } if(tid == 0) memcpy(res+blockIdx.x, sdata, sizeof(decimal)); } /* * calculate l_extendedprice*(1-l_discount) */ __global__ void mul_discount(decimal *e, decimal *d, int n, decimal *one) { extern __shared__ decimal sdata[]; int tid = threadIdx.x; int i = blockDim.x * blockIdx.x + threadIdx.x; memset(sdata+tid, 0, sizeof(decimal)); if(tid == 0) memcpy(&sdata[blockDim.x], one, sizeof(decimal)); __syncthreads(); d[i].sign = 1; var_add(sdata[blockDim.x], d[i], sdata[tid]); var_mul(e[i], sdata[tid], sdata[tid]); memcpy(e+i, sdata+tid, sizeof(decimal)); } /* * calculate l_extendedprice*(1-l_discount)*(1+l_tax) */ __global__ void mul_discount_tax(decimal *e, decimal *d, decimal *t, int n, decimal *one) { extern __shared__ decimal sdata[]; int tid = threadIdx.x; int i = blockDim.x * blockIdx.x + threadIdx.x; memset(sdata+tid*2, 0, sizeof(decimal)*2); if(tid == 0) memcpy(&sdata[blockDim.x*2], one, sizeof(decimal)); __syncthreads(); decimal &tmpRes = sdata[tid*2]; decimal &tmpRes2 = sdata[tid*2+1]; d[i].sign = 1; var_add(sdata[blockDim.x*2], d[i], tmpRes); var_add(sdata[blockDim.x*2], t[i], tmpRes2); var_mul(e[i], tmpRes, tmpRes); var_mul(tmpRes, tmpRes2, tmpRes2); memcpy(e+i, sdata+tid*2+1, sizeof(decimal)); } int main(int argc, char *argv[]) { CPUTimer cpuTimer; double cpuPerf; GPUTimer gpuTimer; float gpuPerf; double cpuPerfTotal = 0.0; float gpuPerfTotal = 0.0; const char *datafile = "/data/tpch/data/scale_1/csv/org/lineitem.tbl"; //const char *datafile = "/data/tpch/tpch100/lineitem.tbl"; /* * l_quantity(5): decimal 36,0 * l_extendedprice(6): decimal 36,2 * l_discount(7): decimal 36,2 * l_tax(8): decimal 36,2 */ std::vector<std::string> q_str; std::vector<std::string> e_str; std::vector<std::string> d_str; std::vector<std::string> t_str; // load quantity, extendedprice, discount, and tax from lineitem cpuPerf = cpuTimer.timing( [&](){ readLines(datafile, (uint64_t)-1, [&](std::string l) { extractFields(l, {4, 5, 6, 7}, 0, q_str, e_str, d_str, t_str); }); }); printf("Read file complete! %lf ms\n", cpuPerf); printf(" l_quantity.size() is %lu, l_extendedprice.size() is %lu, l_discount.size() is %lu, l_tax.size() is %lu\n", q_str.size(), e_str.size(), d_str.size(), t_str.size()); decimal *q_cpu, *q_gpu; decimal *e_cpu, *e_gpu; decimal *d_cpu, *d_gpu; decimal *t_cpu, *t_gpu; // allocate memory on both GPU and CPU for holding decimals transformed from the string arrays auto allocate = [](std::vector<std::string> &strs, decimal **cpu, decimal **gpu) { size_t free, total; gpuErrchk( hipMemGetInfo(&free, &total) ); //printf("Device Memory: %lu/%lu MB\n", free / (1024 * 1024), total / (1024 * 1024)); size_t size = sizeof(decimal) * strs.size(); printf(" allocate %lf/%lf MB on CPU and GPU...\n", size / (1024 * 1024.0), free / (1024 * 1024.0)); if(size > free){ printf("Failed to allocate memory %lu (%lf MB), free: %lu\n", size, size / (1024 * 1024.0), free); exit(-1); } *cpu = (decimal *)malloc(sizeof(decimal) * strs.size()); gpuErrchk( hipMalloc((void **)gpu, sizeof(decimal) * strs.size()) ); for(int i = 0; i < strs.size(); i++) (*cpu)[i] = decimal(strs[i]); gpuErrchk( hipMemcpy(*gpu, *cpu, sizeof(decimal) * strs.size(), hipMemcpyHostToDevice) ); }; // cpuPerf = cpuTimer.timing( [&](){ // allocate(q_str, &q_cpu, &q_gpu); // allocate(e_str, &e_cpu, &e_gpu); // allocate(d_str, &d_cpu, &d_gpu); // allocate(t_str, &t_cpu, &t_gpu); // }); // printf("Load data complete! %lf ms\n", cpuPerf); decimal zero(0); decimal sum_cpu(0); size_t tupleNr = q_str.size(); auto setZeroCpu = [&](decimal &d) { memcpy(&d, &zero, sizeof(decimal)); }; assert(q_str.size() == e_str.size()); assert(e_str.size() == d_str.size()); assert(d_str.size() == t_str.size()); // thread number in a threadblock int threadNr = 256; size_t resNr = (tupleNr - 1) / threadNr + 1; decimal *sum_gpu; auto setZeroGpu = [&](decimal *d, size_t n) { for(int i = 0; i < n; i++) gpuErrchk( hipMemcpy(d + i, &zero, sizeof(decimal), hipMemcpyHostToDevice) ); }; decimal sum_res; // sum(l_quanlity) printf("sum(l_quanlity) tupleNr=%lu\n", tupleNr); cpuPerf = cpuTimer.timing( [&](){ allocate(q_str, &q_cpu, &q_gpu); }); printf(" Load data complete! %lf ms\n", cpuPerf); printf(" accumulation in decimal (CPU):"); setZeroCpu(sum_cpu); cpuPerf = cpuTimer.timing( [&](){ for(int i = 0; i < tupleNr; i++) var_add(q_cpu[i], sum_cpu, sum_cpu); }); std::cout << sum_cpu; printf(" %lf ms\n", cpuPerf); cpuPerfTotal += cpuPerf; printf(" accumulation in decimal (GPU):"); gpuErrchk( hipMalloc((void **)&sum_gpu, sizeof(decimal) * resNr) ); setZeroGpu(sum_gpu, resNr); gpuPerf = gpuTimer.timing( [&](){ size_t _tupleNr = tupleNr; size_t _resNr = resNr; decimal *_q_gpu = q_gpu; decimal *_sum_gpu = sum_gpu; while(_tupleNr > 1){ hipLaunchKernelGGL(( accumulate), dim3(_resNr), dim3(threadNr), sizeof(decimal)*threadNr, 0, _q_gpu, _tupleNr, _sum_gpu); decimal *tmp = _q_gpu; _q_gpu = _sum_gpu; _sum_gpu = tmp; _tupleNr = _resNr; _resNr = (_tupleNr - 1) / threadNr + 1; } gpuErrchk( hipMemcpy(&sum_res, _q_gpu, sizeof(decimal), hipMemcpyDeviceToHost) ); }); hipDeviceSynchronize(); std::cout << sum_res; printf(" %f ms\n", gpuPerf); gpuPerfTotal += gpuPerf; free( q_cpu ); gpuErrchk( hipFree(q_gpu) ); // sum(l_extendedprice) printf("sum(l_extendedprice) tupleNr=%lu\n", tupleNr); cpuPerf = cpuTimer.timing( [&](){ allocate(e_str, &e_cpu, &e_gpu); }); printf(" Load data complete! %lf ms\n", cpuPerf); printf(" accumulation in decimal (CPU):"); setZeroCpu(sum_cpu); cpuPerf = cpuTimer.timing( [&](){ for(int i = 0; i < tupleNr; i++) var_add(e_cpu[i], sum_cpu, sum_cpu); }); std::cout << sum_cpu; printf(" %lf ms\n", cpuPerf); cpuPerfTotal += cpuPerf; printf(" accumulation in decimal (GPU):"); setZeroGpu(sum_gpu, resNr); setZeroCpu(sum_res); gpuPerf = gpuTimer.timing( [&](){ size_t _tupleNr = tupleNr; size_t _resNr = resNr; decimal *_e_gpu = e_gpu; decimal *_sum_gpu = sum_gpu; while(_tupleNr > 1){ hipLaunchKernelGGL(( accumulate), dim3(_resNr), dim3(threadNr), sizeof(decimal)*threadNr, 0, _e_gpu, _tupleNr, _sum_gpu); decimal *tmp = _e_gpu; _e_gpu = _sum_gpu; _sum_gpu = tmp; _tupleNr = _resNr; _resNr = (_tupleNr - 1) / threadNr + 1; } gpuErrchk( hipMemcpy(&sum_res, _e_gpu, sizeof(decimal), hipMemcpyDeviceToHost) ); }); gpuErrchk( hipDeviceSynchronize() ); std::cout << sum_res; printf(" %f ms\n", gpuPerf); gpuPerfTotal += gpuPerf; free( e_cpu ); gpuErrchk( hipFree(e_gpu) ); #if 1 // sum(l_extendedprice*(1-l_discount)) printf("sum(l_extendedprice * (1 - l_discount)) tupleNr=%lu\n", tupleNr); cpuPerf = cpuTimer.timing( [&](){ allocate(e_str, &e_cpu, &e_gpu); allocate(d_str, &d_cpu, &d_gpu); }); printf(" Load data complete! %lf ms\n", cpuPerf); decimal one_cpu = decimal("1", -2); decimal tmpRes = decimal("0"); printf(" accumulation in decimal (CPU):"); setZeroCpu(sum_cpu); cpuPerf = cpuTimer.timing( [&](){ for(int i = 0; i < tupleNr; i++) { d_cpu[i].sign = 1; var_add(one_cpu, d_cpu[i], tmpRes); var_mul(e_cpu[i], tmpRes, tmpRes); var_add(tmpRes, sum_cpu, sum_cpu); } }); std::cout << sum_cpu; printf(" %lf ms\n", cpuPerf); cpuPerfTotal += cpuPerf; printf(" accumulation in decimal (GPU):"); setZeroGpu(sum_gpu, resNr); setZeroCpu(sum_res); decimal *one_gpu; gpuErrchk( hipMalloc((void **)&one_gpu, sizeof(decimal)) ); gpuErrchk( hipMemcpy(one_gpu, &one_cpu, sizeof(decimal), hipMemcpyHostToDevice) ); gpuErrchk( hipMemcpy(e_gpu, e_cpu, sizeof(decimal) * tupleNr, hipMemcpyHostToDevice) ); gpuPerf = gpuTimer.timing( [&](){ size_t _tupleNr = tupleNr; size_t _resNr = resNr; decimal *_e_gpu = e_gpu; decimal *_d_gpu = d_gpu; decimal *_sum_gpu = sum_gpu; hipLaunchKernelGGL(( mul_discount), dim3(_resNr), dim3(threadNr), sizeof(decimal)*(threadNr + 1), 0, _e_gpu, _d_gpu, _tupleNr, one_gpu); while(_tupleNr > 1){ hipLaunchKernelGGL(( accumulate), dim3(_resNr), dim3(threadNr), sizeof(decimal)*threadNr, 0, _e_gpu, _tupleNr, _sum_gpu); decimal *tmp = _e_gpu; _e_gpu = _sum_gpu; _sum_gpu = tmp; _tupleNr = _resNr; _resNr = (_tupleNr - 1) / threadNr + 1; } gpuErrchk( hipMemcpy(&sum_res, _e_gpu, sizeof(decimal), hipMemcpyDeviceToHost) ); }); gpuErrchk( hipDeviceSynchronize() ); std::cout << sum_res; printf(" %f ms\n", gpuPerf); gpuPerfTotal += gpuPerf; free( e_cpu ); gpuErrchk( hipFree(e_gpu) ); free( d_cpu ); gpuErrchk( hipFree(d_gpu) ); // sum(l_extendedprice*(1-l_discount)*(1+l_tax)) printf("sum(l_extendedprice * (1 - l_discount) * (1 + l_tax)) tupleNr=%lu\n", tupleNr); cpuPerf = cpuTimer.timing( [&](){ allocate(e_str, &e_cpu, &e_gpu); allocate(d_str, &d_cpu, &d_gpu); allocate(t_str, &t_cpu, &t_gpu); }); printf(" Load data complete! %lf ms\n", cpuPerf); tmpRes = decimal("0"); decimal tmpRes2 = decimal("0"); printf(" accumulation in decimal (CPU):"); setZeroCpu(sum_cpu); cpuPerf = cpuTimer.timing( [&](){ for(int i = 0; i < tupleNr; i++) { d_cpu[i].sign = 1; var_add(one_cpu, d_cpu[i], tmpRes); var_add(one_cpu, t_cpu[i], tmpRes2); var_mul(e_cpu[i], tmpRes, tmpRes); var_mul(tmpRes, tmpRes2, tmpRes2); var_add(tmpRes2, sum_cpu, sum_cpu); } }); std::cout << sum_cpu; printf(" %lf ms\n", cpuPerf); cpuPerfTotal += cpuPerf; printf(" accumulation in decimal (GPU):"); setZeroGpu(sum_gpu, resNr); setZeroCpu(sum_res); gpuErrchk( hipMemcpy(e_gpu, e_cpu, sizeof(decimal) * tupleNr, hipMemcpyHostToDevice) ); gpuPerf = gpuTimer.timing( [&](){ size_t _tupleNr = tupleNr; size_t _resNr = resNr; decimal *_e_gpu = e_gpu; decimal *_d_gpu = d_gpu; decimal *_t_gpu = t_gpu; decimal *_sum_gpu = sum_gpu; hipLaunchKernelGGL(( mul_discount_tax), dim3(_resNr), dim3(threadNr), sizeof(decimal)*(2*threadNr + 1), 0, _e_gpu, _d_gpu, _t_gpu, _tupleNr, one_gpu); while(_tupleNr > 1){ hipLaunchKernelGGL(( accumulate), dim3(_resNr), dim3(threadNr), sizeof(decimal)*threadNr, 0, _e_gpu, _tupleNr, _sum_gpu); decimal *tmp = _e_gpu; _e_gpu = _sum_gpu; _sum_gpu = tmp; _tupleNr = _resNr; _resNr = (_tupleNr - 1) / threadNr + 1; } gpuErrchk( hipMemcpy(&sum_res, _e_gpu, sizeof(decimal), hipMemcpyDeviceToHost) ); }); gpuErrchk( hipDeviceSynchronize() ); std::cout << sum_res; printf(" %f ms\n", gpuPerf); gpuPerfTotal += gpuPerf; free( e_cpu ); gpuErrchk( hipFree(e_gpu) ); free( d_cpu ); gpuErrchk( hipFree(d_gpu) ); free( t_cpu ); gpuErrchk( hipFree(t_gpu) ); #endif // avg(l_discount) printf("avg(l_discount) tupleNr=%lu\n", tupleNr); cpuPerf = cpuTimer.timing( [&](){ allocate(d_str, &d_cpu, &d_gpu); }); printf(" Load data complete! %lf ms\n", cpuPerf); printf(" accumulation in decimal (CPU):"); setZeroCpu(sum_cpu); cpuPerf = cpuTimer.timing( [&](){ for(int i = 0; i < tupleNr; i++) var_add(d_cpu[i], sum_cpu, sum_cpu); }); std::cout << sum_cpu; printf(" %lf ms\n", cpuPerf); cpuPerfTotal += cpuPerf; printf(" accumulation in decimal (GPU):"); setZeroGpu(sum_gpu, resNr); setZeroCpu(sum_res); gpuPerf = gpuTimer.timing( [&](){ size_t _tupleNr = tupleNr; size_t _resNr = resNr; decimal *_d_gpu = d_gpu; decimal *_sum_gpu = sum_gpu; while(_tupleNr > 1){ hipLaunchKernelGGL(( accumulate), dim3(_resNr), dim3(threadNr), sizeof(decimal)*threadNr, 0, _d_gpu, _tupleNr, _sum_gpu); decimal *tmp = _d_gpu; _d_gpu = _sum_gpu; _sum_gpu = tmp; _tupleNr = _resNr; _resNr = (_tupleNr - 1) / threadNr + 1; } gpuErrchk( hipMemcpy(&sum_res, _d_gpu, sizeof(decimal), hipMemcpyDeviceToHost) ); }); gpuErrchk( hipDeviceSynchronize() ); std::cout << sum_res; printf(" %f ms\n", gpuPerf); gpuPerfTotal += gpuPerf; free( d_cpu ); gpuErrchk( hipFree(d_gpu) ); printf("Time on CPU: %lf ms\n", cpuPerfTotal); printf("Time on GPU: %f ms\n", gpuPerfTotal); return 0; }
6b530d7cd1258e71340728e547d3a4f59ce43bcb.cu
#include <cstdio> #include <cstdint> #include <cstring> #include <iostream> #include <fstream> #include <string> #include <numeric> #include <sys/time.h> #include <cstdarg> #include <algorithm> #include <cassert> #include "utils.h" #define VLEN 5 // The decimal buffer length #define CAP 1000000000 // The capacity of one decimal buffer word #define CAP_POW 9 // The capacity power of one decimal buffer word // #define CAP 100 // #define CAP_POW 2 /* * decimal: representing a decimal number using VLEN int32_t; * sign: 0: positive, 1: negative * prec: the precision of the decimal; * frac: the fraction part of the decimal; * v[VLEN]: the values; v[0] is the least significant part; */ struct decimal { uint32_t sign:1; int32_t prec:8; int32_t frac:7; int32_t v[VLEN]; decimal(int32_t v, int f); decimal(std::string, int); decimal(); friend std::ostream& operator<<(std::ostream&, const decimal &); }; /* * initialize a decimal using int32_t * value: the initialization number, without decimal point * f: the decimal point position */ decimal::decimal(int32_t value, int f = 0) { sign = value < 0; frac = f; if(value < 0) value = -value; for(int i = 0; i < VLEN; i++){ v[i] = value % CAP; value /= CAP; } } decimal::decimal() { decimal(0); } /* * initialize a decimal using std::string * str: the initialzation number * pow: change the representation according to the power, e.g., ("1", -2) would be v[0]=100, frac = 2 */ decimal::decimal(std::string str, int pow = 0) { size_t pos; memset(v, 0, VLEN*sizeof(int32_t)); pos = str.find('.'); if(str[0] == '-'){ sign = 1; str.erase(str.begin()); }else sign = 0; if(pos == std::string::npos){ frac = 0; }else{ frac = -(str.size() - pos - 1); str.erase(str.begin() + pos); } if(pow < 0){ for(int i = 0; i < -pow; i++) str += "0"; frac += pow; } int i = 0; while(str.size() >= CAP_POW){ v[i++] = stoi(str.substr(str.size()-CAP_POW, CAP_POW)); str = str.substr(0, str.size() - CAP_POW); } v[i] = stoi(str); } std::ostream &operator<<(std::ostream& os, const decimal &d) { os << "frac: " << d.frac << ", "; os << (d.sign?"-":"+"); for(int i = VLEN-1; i >= 0; i--) os << d.v[i] << (i != 0 ? " " : ""); return os; } /* * Add two absolute decimals hold in two consecutive VLEN int32_t buffer * a: the first decimal * b: the second decimal * res: the result decimal. This could be a or b * overflow: indicate if overflow */ __host__ __device__ void abs_add(int32_t *a, int32_t *b, int32_t *res, int32_t &overflow) { overflow = 0; for(int i = 0; i < VLEN; i++){ res[i] = a[i] + b[i] + overflow; overflow = res[i] / CAP; res[i] = res[i] % CAP; } } /* * Compare two absolute decimals hold in two consecutive VLEN int32_t buffer * a: the first decimal * b: the second decimal * Return values: >0 (a > b), 0 (a == b), <0 (a < b) * */ __host__ __device__ int32_t abs_cmp(int32_t *a, int32_t *b) { int32_t res = 0; #pragma unroll for (int i = VLEN - 1; i >= 0 && res == 0; i--) { res = a[i] - b[i]; } return res; } /* * Substract an absolute decimal (b) from an absolute decimal (a) */ __host__ __device__ void abs_sub(int32_t *a, int32_t *b, int32_t *res) { int32_t *sub1, *sub2; int32_t r = abs_cmp(a, b); if(r >= 0){ sub1 = a; sub2 = b; }else{ sub1 = b; sub2 = a; } int32_t carry = 0; for(int i = 0; i < VLEN; i++){ res[i] = sub1[i] + CAP - sub2[i] - carry; carry = !(res[i] / CAP); res[i] = res[i] % CAP; } } /* * Add two decimals, the fraction length should be the same before adding */ __host__ __device__ void var_add(struct decimal &v1, struct decimal &v2, struct decimal &res) { int32_t overflow = 0; res.prec = v1.prec; res.frac = v1.frac; res.sign = v1.sign; if(v1.sign ^ v2.sign == 0){ abs_add(v1.v, v2.v, res.v, overflow); }else{ abs_sub(v1.v, v2.v, res.v); res.sign = (abs_cmp(v1.v, v2.v) > 0 && v1.sign) || (abs_cmp(v1.v, v2.v) < 0 && !v1.sign); } } __host__ __device__ void abs_lshift(int32_t *a, int len, int n, int32_t *res) { } /* * right shift an absolute decimal * a: the input decimal * len: the length of the input buffer * n: how many digits it shifts * res: the output decimal, could be same as the input buffer */ __host__ __device__ void abs_rshift(int32_t *a, int len, int n, int32_t *res) { int32_t rword = n / CAP_POW; int32_t rbit = n % CAP_POW; int32_t rd = 1; int32_t rl = 1; for(int i = 0; i < rbit; i++) rd *= 10; for(int i = 0; i < CAP_POW - rbit; i++) rl *= 10; for(int i = 0; i < len - rword - 1; i++){ res[i] = a[rword + i] / rd + a[rword + i + 1] % rd * rl; } res[len - rword - 1] = a[len - 1] / rd; for(int i = len - rword; i < len; i++) res[i] = 0; } /* * multiply two absolute decimals * a: the first decimal * b: the second decimal * res: the result decimal. The buffer size should be VLEN*2 */ __host__ __device__ void abs_mul(int32_t *a, int32_t *b, int32_t *res) { int64_t temp; int32_t carry; for(int i = 0; i < VLEN * 2; i++) res[i] = 0; for(int i = 0; i < VLEN; i++){ carry = 0; for(int j = 0; j < VLEN; j++){ temp = (int64_t)a[i] * b[j] + res[i+j] + carry; carry = temp / CAP; res[i+j] = temp % CAP; } res[i+VLEN] = carry; } } /* * multiply two decimals */ __host__ __device__ void var_mul(struct decimal &v1, struct decimal &v2, struct decimal &res) { int32_t overflow = 0; res.prec = v1.prec; res.frac = v1.frac + v2.frac; res.sign = v1.sign ^ v2.sign; int32_t inner_res[VLEN*2]; abs_mul(v1.v, v2.v, inner_res); //abs_rshift(inner_res, VLEN*2, res.prec - (v2.frac + v1.frac), inner_res); // or abs_lfshit for(int i = 0; i < VLEN; i++) res.v[i] = inner_res[i]; for(int i = VLEN; i < 2*VLEN; i++) overflow = (overflow || inner_res[i]); } /* * multiply two decimals, and set the fraction of the result to frac */ __host__ __device__ void var_mul(struct decimal &v1, struct decimal &v2, struct decimal &res, int frac) { int32_t overflow = 0; res.prec = v1.prec; res.frac = v1.frac + v2.frac; res.sign = v1.sign ^ v2.sign; int32_t inner_res[VLEN*2]; abs_mul(v1.v, v2.v, inner_res); if(res.frac < frac){ abs_rshift(inner_res, VLEN*2, frac - res.frac, inner_res); res.frac = frac; }else if(res.frac > frac){ abs_lshift(inner_res, VLEN*2, frac - res.frac, inner_res); res.frac = frac; } for(int i = 0; i < VLEN; i++) res.v[i] = inner_res[i]; for(int i = VLEN; i < 2*VLEN; i++) overflow = (overflow || inner_res[i]); } /* * accumulate decimals in the threadblock */ __global__ void accumulate(decimal *a, int n, decimal *res) { extern __shared__ decimal sdata[]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; if(i < n) memcpy(sdata+tid, a+i, sizeof(decimal)); else memset(sdata+tid, 0, sizeof(decimal)); __syncthreads(); for(unsigned int s = blockDim.x/2; s > 0; s >>= 1){ if(tid < s){ var_add(sdata[tid], sdata[tid+s], sdata[tid]); } __syncthreads(); } if(tid == 0) memcpy(res+blockIdx.x, sdata, sizeof(decimal)); } /* * calculate l_extendedprice*(1-l_discount) */ __global__ void mul_discount(decimal *e, decimal *d, int n, decimal *one) { extern __shared__ decimal sdata[]; int tid = threadIdx.x; int i = blockDim.x * blockIdx.x + threadIdx.x; memset(sdata+tid, 0, sizeof(decimal)); if(tid == 0) memcpy(&sdata[blockDim.x], one, sizeof(decimal)); __syncthreads(); d[i].sign = 1; var_add(sdata[blockDim.x], d[i], sdata[tid]); var_mul(e[i], sdata[tid], sdata[tid]); memcpy(e+i, sdata+tid, sizeof(decimal)); } /* * calculate l_extendedprice*(1-l_discount)*(1+l_tax) */ __global__ void mul_discount_tax(decimal *e, decimal *d, decimal *t, int n, decimal *one) { extern __shared__ decimal sdata[]; int tid = threadIdx.x; int i = blockDim.x * blockIdx.x + threadIdx.x; memset(sdata+tid*2, 0, sizeof(decimal)*2); if(tid == 0) memcpy(&sdata[blockDim.x*2], one, sizeof(decimal)); __syncthreads(); decimal &tmpRes = sdata[tid*2]; decimal &tmpRes2 = sdata[tid*2+1]; d[i].sign = 1; var_add(sdata[blockDim.x*2], d[i], tmpRes); var_add(sdata[blockDim.x*2], t[i], tmpRes2); var_mul(e[i], tmpRes, tmpRes); var_mul(tmpRes, tmpRes2, tmpRes2); memcpy(e+i, sdata+tid*2+1, sizeof(decimal)); } int main(int argc, char *argv[]) { CPUTimer cpuTimer; double cpuPerf; GPUTimer gpuTimer; float gpuPerf; double cpuPerfTotal = 0.0; float gpuPerfTotal = 0.0; const char *datafile = "/data/tpch/data/scale_1/csv/org/lineitem.tbl"; //const char *datafile = "/data/tpch/tpch100/lineitem.tbl"; /* * l_quantity(5): decimal 36,0 * l_extendedprice(6): decimal 36,2 * l_discount(7): decimal 36,2 * l_tax(8): decimal 36,2 */ std::vector<std::string> q_str; std::vector<std::string> e_str; std::vector<std::string> d_str; std::vector<std::string> t_str; // load quantity, extendedprice, discount, and tax from lineitem cpuPerf = cpuTimer.timing( [&](){ readLines(datafile, (uint64_t)-1, [&](std::string l) { extractFields(l, {4, 5, 6, 7}, 0, q_str, e_str, d_str, t_str); }); }); printf("Read file complete! %lf ms\n", cpuPerf); printf(" l_quantity.size() is %lu, l_extendedprice.size() is %lu, l_discount.size() is %lu, l_tax.size() is %lu\n", q_str.size(), e_str.size(), d_str.size(), t_str.size()); decimal *q_cpu, *q_gpu; decimal *e_cpu, *e_gpu; decimal *d_cpu, *d_gpu; decimal *t_cpu, *t_gpu; // allocate memory on both GPU and CPU for holding decimals transformed from the string arrays auto allocate = [](std::vector<std::string> &strs, decimal **cpu, decimal **gpu) { size_t free, total; gpuErrchk( cudaMemGetInfo(&free, &total) ); //printf("Device Memory: %lu/%lu MB\n", free / (1024 * 1024), total / (1024 * 1024)); size_t size = sizeof(decimal) * strs.size(); printf(" allocate %lf/%lf MB on CPU and GPU...\n", size / (1024 * 1024.0), free / (1024 * 1024.0)); if(size > free){ printf("Failed to allocate memory %lu (%lf MB), free: %lu\n", size, size / (1024 * 1024.0), free); exit(-1); } *cpu = (decimal *)malloc(sizeof(decimal) * strs.size()); gpuErrchk( cudaMalloc((void **)gpu, sizeof(decimal) * strs.size()) ); for(int i = 0; i < strs.size(); i++) (*cpu)[i] = decimal(strs[i]); gpuErrchk( cudaMemcpy(*gpu, *cpu, sizeof(decimal) * strs.size(), cudaMemcpyHostToDevice) ); }; // cpuPerf = cpuTimer.timing( [&](){ // allocate(q_str, &q_cpu, &q_gpu); // allocate(e_str, &e_cpu, &e_gpu); // allocate(d_str, &d_cpu, &d_gpu); // allocate(t_str, &t_cpu, &t_gpu); // }); // printf("Load data complete! %lf ms\n", cpuPerf); decimal zero(0); decimal sum_cpu(0); size_t tupleNr = q_str.size(); auto setZeroCpu = [&](decimal &d) { memcpy(&d, &zero, sizeof(decimal)); }; assert(q_str.size() == e_str.size()); assert(e_str.size() == d_str.size()); assert(d_str.size() == t_str.size()); // thread number in a threadblock int threadNr = 256; size_t resNr = (tupleNr - 1) / threadNr + 1; decimal *sum_gpu; auto setZeroGpu = [&](decimal *d, size_t n) { for(int i = 0; i < n; i++) gpuErrchk( cudaMemcpy(d + i, &zero, sizeof(decimal), cudaMemcpyHostToDevice) ); }; decimal sum_res; // sum(l_quanlity) printf("sum(l_quanlity) tupleNr=%lu\n", tupleNr); cpuPerf = cpuTimer.timing( [&](){ allocate(q_str, &q_cpu, &q_gpu); }); printf(" Load data complete! %lf ms\n", cpuPerf); printf(" accumulation in decimal (CPU):"); setZeroCpu(sum_cpu); cpuPerf = cpuTimer.timing( [&](){ for(int i = 0; i < tupleNr; i++) var_add(q_cpu[i], sum_cpu, sum_cpu); }); std::cout << sum_cpu; printf(" %lf ms\n", cpuPerf); cpuPerfTotal += cpuPerf; printf(" accumulation in decimal (GPU):"); gpuErrchk( cudaMalloc((void **)&sum_gpu, sizeof(decimal) * resNr) ); setZeroGpu(sum_gpu, resNr); gpuPerf = gpuTimer.timing( [&](){ size_t _tupleNr = tupleNr; size_t _resNr = resNr; decimal *_q_gpu = q_gpu; decimal *_sum_gpu = sum_gpu; while(_tupleNr > 1){ accumulate<<<_resNr, threadNr, sizeof(decimal)*threadNr>>>(_q_gpu, _tupleNr, _sum_gpu); decimal *tmp = _q_gpu; _q_gpu = _sum_gpu; _sum_gpu = tmp; _tupleNr = _resNr; _resNr = (_tupleNr - 1) / threadNr + 1; } gpuErrchk( cudaMemcpy(&sum_res, _q_gpu, sizeof(decimal), cudaMemcpyDeviceToHost) ); }); cudaDeviceSynchronize(); std::cout << sum_res; printf(" %f ms\n", gpuPerf); gpuPerfTotal += gpuPerf; free( q_cpu ); gpuErrchk( cudaFree(q_gpu) ); // sum(l_extendedprice) printf("sum(l_extendedprice) tupleNr=%lu\n", tupleNr); cpuPerf = cpuTimer.timing( [&](){ allocate(e_str, &e_cpu, &e_gpu); }); printf(" Load data complete! %lf ms\n", cpuPerf); printf(" accumulation in decimal (CPU):"); setZeroCpu(sum_cpu); cpuPerf = cpuTimer.timing( [&](){ for(int i = 0; i < tupleNr; i++) var_add(e_cpu[i], sum_cpu, sum_cpu); }); std::cout << sum_cpu; printf(" %lf ms\n", cpuPerf); cpuPerfTotal += cpuPerf; printf(" accumulation in decimal (GPU):"); setZeroGpu(sum_gpu, resNr); setZeroCpu(sum_res); gpuPerf = gpuTimer.timing( [&](){ size_t _tupleNr = tupleNr; size_t _resNr = resNr; decimal *_e_gpu = e_gpu; decimal *_sum_gpu = sum_gpu; while(_tupleNr > 1){ accumulate<<<_resNr, threadNr, sizeof(decimal)*threadNr>>>(_e_gpu, _tupleNr, _sum_gpu); decimal *tmp = _e_gpu; _e_gpu = _sum_gpu; _sum_gpu = tmp; _tupleNr = _resNr; _resNr = (_tupleNr - 1) / threadNr + 1; } gpuErrchk( cudaMemcpy(&sum_res, _e_gpu, sizeof(decimal), cudaMemcpyDeviceToHost) ); }); gpuErrchk( cudaDeviceSynchronize() ); std::cout << sum_res; printf(" %f ms\n", gpuPerf); gpuPerfTotal += gpuPerf; free( e_cpu ); gpuErrchk( cudaFree(e_gpu) ); #if 1 // sum(l_extendedprice*(1-l_discount)) printf("sum(l_extendedprice * (1 - l_discount)) tupleNr=%lu\n", tupleNr); cpuPerf = cpuTimer.timing( [&](){ allocate(e_str, &e_cpu, &e_gpu); allocate(d_str, &d_cpu, &d_gpu); }); printf(" Load data complete! %lf ms\n", cpuPerf); decimal one_cpu = decimal("1", -2); decimal tmpRes = decimal("0"); printf(" accumulation in decimal (CPU):"); setZeroCpu(sum_cpu); cpuPerf = cpuTimer.timing( [&](){ for(int i = 0; i < tupleNr; i++) { d_cpu[i].sign = 1; var_add(one_cpu, d_cpu[i], tmpRes); var_mul(e_cpu[i], tmpRes, tmpRes); var_add(tmpRes, sum_cpu, sum_cpu); } }); std::cout << sum_cpu; printf(" %lf ms\n", cpuPerf); cpuPerfTotal += cpuPerf; printf(" accumulation in decimal (GPU):"); setZeroGpu(sum_gpu, resNr); setZeroCpu(sum_res); decimal *one_gpu; gpuErrchk( cudaMalloc((void **)&one_gpu, sizeof(decimal)) ); gpuErrchk( cudaMemcpy(one_gpu, &one_cpu, sizeof(decimal), cudaMemcpyHostToDevice) ); gpuErrchk( cudaMemcpy(e_gpu, e_cpu, sizeof(decimal) * tupleNr, cudaMemcpyHostToDevice) ); gpuPerf = gpuTimer.timing( [&](){ size_t _tupleNr = tupleNr; size_t _resNr = resNr; decimal *_e_gpu = e_gpu; decimal *_d_gpu = d_gpu; decimal *_sum_gpu = sum_gpu; mul_discount<<<_resNr, threadNr, sizeof(decimal)*(threadNr + 1)>>>(_e_gpu, _d_gpu, _tupleNr, one_gpu); while(_tupleNr > 1){ accumulate<<<_resNr, threadNr, sizeof(decimal)*threadNr>>>(_e_gpu, _tupleNr, _sum_gpu); decimal *tmp = _e_gpu; _e_gpu = _sum_gpu; _sum_gpu = tmp; _tupleNr = _resNr; _resNr = (_tupleNr - 1) / threadNr + 1; } gpuErrchk( cudaMemcpy(&sum_res, _e_gpu, sizeof(decimal), cudaMemcpyDeviceToHost) ); }); gpuErrchk( cudaDeviceSynchronize() ); std::cout << sum_res; printf(" %f ms\n", gpuPerf); gpuPerfTotal += gpuPerf; free( e_cpu ); gpuErrchk( cudaFree(e_gpu) ); free( d_cpu ); gpuErrchk( cudaFree(d_gpu) ); // sum(l_extendedprice*(1-l_discount)*(1+l_tax)) printf("sum(l_extendedprice * (1 - l_discount) * (1 + l_tax)) tupleNr=%lu\n", tupleNr); cpuPerf = cpuTimer.timing( [&](){ allocate(e_str, &e_cpu, &e_gpu); allocate(d_str, &d_cpu, &d_gpu); allocate(t_str, &t_cpu, &t_gpu); }); printf(" Load data complete! %lf ms\n", cpuPerf); tmpRes = decimal("0"); decimal tmpRes2 = decimal("0"); printf(" accumulation in decimal (CPU):"); setZeroCpu(sum_cpu); cpuPerf = cpuTimer.timing( [&](){ for(int i = 0; i < tupleNr; i++) { d_cpu[i].sign = 1; var_add(one_cpu, d_cpu[i], tmpRes); var_add(one_cpu, t_cpu[i], tmpRes2); var_mul(e_cpu[i], tmpRes, tmpRes); var_mul(tmpRes, tmpRes2, tmpRes2); var_add(tmpRes2, sum_cpu, sum_cpu); } }); std::cout << sum_cpu; printf(" %lf ms\n", cpuPerf); cpuPerfTotal += cpuPerf; printf(" accumulation in decimal (GPU):"); setZeroGpu(sum_gpu, resNr); setZeroCpu(sum_res); gpuErrchk( cudaMemcpy(e_gpu, e_cpu, sizeof(decimal) * tupleNr, cudaMemcpyHostToDevice) ); gpuPerf = gpuTimer.timing( [&](){ size_t _tupleNr = tupleNr; size_t _resNr = resNr; decimal *_e_gpu = e_gpu; decimal *_d_gpu = d_gpu; decimal *_t_gpu = t_gpu; decimal *_sum_gpu = sum_gpu; mul_discount_tax<<<_resNr, threadNr, sizeof(decimal)*(2*threadNr + 1)>>>(_e_gpu, _d_gpu, _t_gpu, _tupleNr, one_gpu); while(_tupleNr > 1){ accumulate<<<_resNr, threadNr, sizeof(decimal)*threadNr>>>(_e_gpu, _tupleNr, _sum_gpu); decimal *tmp = _e_gpu; _e_gpu = _sum_gpu; _sum_gpu = tmp; _tupleNr = _resNr; _resNr = (_tupleNr - 1) / threadNr + 1; } gpuErrchk( cudaMemcpy(&sum_res, _e_gpu, sizeof(decimal), cudaMemcpyDeviceToHost) ); }); gpuErrchk( cudaDeviceSynchronize() ); std::cout << sum_res; printf(" %f ms\n", gpuPerf); gpuPerfTotal += gpuPerf; free( e_cpu ); gpuErrchk( cudaFree(e_gpu) ); free( d_cpu ); gpuErrchk( cudaFree(d_gpu) ); free( t_cpu ); gpuErrchk( cudaFree(t_gpu) ); #endif // avg(l_discount) printf("avg(l_discount) tupleNr=%lu\n", tupleNr); cpuPerf = cpuTimer.timing( [&](){ allocate(d_str, &d_cpu, &d_gpu); }); printf(" Load data complete! %lf ms\n", cpuPerf); printf(" accumulation in decimal (CPU):"); setZeroCpu(sum_cpu); cpuPerf = cpuTimer.timing( [&](){ for(int i = 0; i < tupleNr; i++) var_add(d_cpu[i], sum_cpu, sum_cpu); }); std::cout << sum_cpu; printf(" %lf ms\n", cpuPerf); cpuPerfTotal += cpuPerf; printf(" accumulation in decimal (GPU):"); setZeroGpu(sum_gpu, resNr); setZeroCpu(sum_res); gpuPerf = gpuTimer.timing( [&](){ size_t _tupleNr = tupleNr; size_t _resNr = resNr; decimal *_d_gpu = d_gpu; decimal *_sum_gpu = sum_gpu; while(_tupleNr > 1){ accumulate<<<_resNr, threadNr, sizeof(decimal)*threadNr>>>(_d_gpu, _tupleNr, _sum_gpu); decimal *tmp = _d_gpu; _d_gpu = _sum_gpu; _sum_gpu = tmp; _tupleNr = _resNr; _resNr = (_tupleNr - 1) / threadNr + 1; } gpuErrchk( cudaMemcpy(&sum_res, _d_gpu, sizeof(decimal), cudaMemcpyDeviceToHost) ); }); gpuErrchk( cudaDeviceSynchronize() ); std::cout << sum_res; printf(" %f ms\n", gpuPerf); gpuPerfTotal += gpuPerf; free( d_cpu ); gpuErrchk( cudaFree(d_gpu) ); printf("Time on CPU: %lf ms\n", cpuPerfTotal); printf("Time on GPU: %f ms\n", gpuPerfTotal); return 0; }
033001c92025c06a136f8c26af27a6b9584ec3bc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * (C) Copyright 2020, 2021 IBM. All Rights Reserved. * * This code is licensed under the Apache License, Version 2.0. You may * obtain a copy of this license in the LICENSE.txt file in the root directory * of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. * * Any modifications or derivative works of this code must retain this * copyright notice, and modified files need to carry a notice indicating * that they have been altered from the originals. */ #include "cuda_math_util.h" #include "cuda_util.h" #include "utility_functions.h" #include <chrono> #include <iostream> #include <memory> #define IDX2F(i, j, ld) ((((j)-1) * (ld)) + ((i)-1)) // this should be not necesary, because device id is set individually // per thread. However, if one would want to use 2 GPUs within one // thread, one needs it. #define RPU_EXPLICIT_ENFORCE_DEVICE_ID namespace RPU { __global__ void kernelCurandSetup(unsigned long long rseed, hiprandState_t *state, int n) { int id = threadIdx.x + blockIdx.x * blockDim.x; /* Each thread gets same seed, a different sequence number, no offset */ if (id < n) { hiprand_init(rseed, id, 0, &state[id]); } } __global__ void kernelCurandSetupSameSeed(unsigned long long rseed, hiprandState_t *state, int n) { int id = threadIdx.x + blockIdx.x * blockDim.x; if (id < n) { hiprand_init(rseed, 0, 0, &state[id]); } } void curandSetup(CudaArray<hiprandState_t> &dev_states, unsigned long long rseed, bool same_seed) { unsigned long long seed = rseed; if (rseed == 0) { seed = (unsigned long long)std::chrono::high_resolution_clock::now().time_since_epoch().count(); } else { seed = rseed; } CudaContext *c = dev_states.getContext(); int m = dev_states.getSize(); int nthreads = c->getNThreads(); int nblocks = c->getNBlocks(m, nthreads); if (same_seed) { hipLaunchKernelGGL(( kernelCurandSetupSameSeed), dim3(nblocks), dim3(nthreads), 0, c->getStream(), seed, dev_states.getData(), m); } else { hipLaunchKernelGGL(( kernelCurandSetup), dim3(nblocks), dim3(nthreads), 0, c->getStream(), seed, dev_states.getData(), m); } c->synchronize(); } void curandSetup( CudaContext *c, std::unique_ptr<CudaArray<hiprandState_t>> &dev_states, int n, unsigned long long rseed, bool same_seed) { int m = (n + 31) / 32 * 32; c->synchronizeDevice(); dev_states = std::unique_ptr<CudaArray<hiprandState_t>>(new CudaArray<hiprandState_t>(c, m)); curandSetup(*dev_states, rseed, same_seed); } CublasEnvironment::~CublasEnvironment() { DEBUG_OUT("Destroy BLAS env."); // DEBUG_OUT("handle : " <<this->handle_); // destroy device // destroy host if (handle_ != nullptr) { hipblasDestroy(handle_); DEBUG_OUT("CUBLAS destroyed"); } #ifdef RPU_WITH_CUBLAS_DEVICE if (device_handle_created_) { DEBUG_OUT("destroy device handle"); hipLaunchKernelGGL(( kernelCublasDestroy), dim3(1), dim3(1), 0, 0, device_handle_); hipDeviceSynchronize(); hipFree(device_handle_); DEBUG_OUT("CUBLAS device destroyed"); } #endif // hipDeviceReset(); } CublasEnvironment::CublasEnvironment(int gpu_id) { DEBUG_OUT("GET BLAS env."); if (gpu_id >= 0) CUDA_CALL(hipSetDevice(gpu_id)); // create host hipblasStatus_t stat = hipblasCreate(&handle_); CUDA_CALL(hipDeviceSynchronize()); // DEBUG_CALL(this->test();); // DEBUG_OUT("handle : " <<handle_); if (stat != HIPBLAS_STATUS_SUCCESS) { RPU_FATAL("CUBLAS initialization failed"); } else DEBUG_OUT("CUBLAS Host initialized."); #ifdef RPU_WITH_CUBLAS_DEVICE device_handle_created_ = false; #endif } void CublasEnvironment::test() { this->runTest(); #ifdef RPU_WITH_CUBLAS_DEVICE if (device_handle_created_) { this->runTestDevice(); } #endif } static __inline__ void modifyS(hipblasHandle_t handle, float *m, int ldm, int n, int p, int q, float alpha, float beta) { hipblasSscal(handle, n - p + 1, &alpha, &m[IDX2F(p, q, ldm)], ldm); hipblasSscal(handle, ldm - p + 1, &beta, &m[IDX2F(p, q, ldm)], 1); } int CublasEnvironment::runTest() { // make a test run hipblasStatus_t stat; int i, j; int M = 5; int N = 6; float *devPtrA; float *a = 0; a = (float *)malloc(M * N * sizeof(*a)); if (!a) { std::cout << "CUBLAS test run failed (malloc)\n"; return 1; } for (j = 1; j <= N; j++) { for (i = 1; i <= M; i++) { a[IDX2F(i, j, M)] = (float)((i - 1) * M + j); } } if (hipMalloc((void **)&devPtrA, M * N * sizeof(*a)) != hipSuccess) { std::cerr << "CUBLAS test run failed (hipMalloc)\n"; free(a); return 1; } modifyS(handle_, devPtrA, M, N, 2, 3, 16.0f, 12.0f); stat = hipblasSetMatrix(M, N, sizeof(*a), a, M, devPtrA, M); if (stat != HIPBLAS_STATUS_SUCCESS) { std::cerr << "CUBLAS test run failed (data download)\n"; hipFree(devPtrA); free(a); return 1; } stat = hipblasGetMatrix(M, N, sizeof(*a), devPtrA, M, a, M); if (stat != HIPBLAS_STATUS_SUCCESS) { std::cerr << "CUBLAS test run failed (data upload)\n"; hipFree(devPtrA); free(a); return 1; } hipFree(devPtrA); for (j = 1; j <= N; j++) { for (i = 1; i <= M; i++) { std::cout << a[IDX2F(i, j, M)] << ","; } std::cout << std::endl; } free(a); std::cout << "CUBLAS test run successful.\n"; return 0; } #ifdef RPU_WITH_CUBLAS_DEVICE __global__ void kernelCublasDestroy(hipblasHandle_t *device_handle) { hipblasStatus_t status = hipblasDestroy(*device_handle); hipDeviceSynchronize(); if (status != HIPBLAS_STATUS_SUCCESS) { printf("ERROR in destroying cublas device!\n"); } } __global__ void kernelCublasCreateDevice(hipblasHandle_t *device_handle) { hipblasStatus_t status = hipblasCreate(device_handle); hipDeviceSynchronize(); if (status != HIPBLAS_STATUS_SUCCESS) { printf("ERROR in creating cublas device!\n"); return; } } void CublasEnvironment::createDeviceHandle() { if (device_handle_created_) return; CUDA_CALL(hipMalloc(&device_handle_, sizeof(hipblasHandle_t))); CUDA_CALL(hipDeviceSynchronize()); hipLaunchKernelGGL(( kernelCublasCreateDevice), dim3(1), dim3(1), 0, 0, device_handle_); CUDA_CALL(hipDeviceSynchronize()); DEBUG_OUT("Created device handle"); device_handle_created_ = true; } hipblasHandle_t *CublasEnvironment::getDeviceHandle() { if (!device_handle_created_) { this->createDeviceHandle(); } return device_handle_; } __global__ void kernelCublasTest(hipblasHandle_t *device_handle, float *source, float *dest) { hipblasStatus_t status = hipblasScopy(*device_handle, 1, source, 1, dest, 1); hipDeviceSynchronize(); if ((status != HIPBLAS_STATUS_SUCCESS)) { printf("Some problems with the CuBLAS device test.\n"); } } int CublasEnvironment::runTestDevice() { float one = 1; float zero = 0; float *a; float *b; CUDA_CALL(hipMalloc(&a, sizeof(float))); CUDA_CALL(hipMalloc(&b, sizeof(float))); CUDA_CALL(hipMemcpy(a, &one, sizeof(float), hipMemcpyHostToDevice)); CUDA_CALL(hipMemcpy(b, &zero, sizeof(float), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( kernelCublasTest), dim3(1), dim3(1), 0, 0, device_handle_, a, b); CUDA_CALL(hipDeviceSynchronize()); CUDA_CALL(hipMemcpy(&zero, b, sizeof(float), hipMemcpyDeviceToHost)); CUDA_CALL(hipFree(a)); CUDA_CALL(hipFree(b)); if (zero == 1) { std::cout << "CuBLAS device test succeded\n"; return 0; } else { std::cerr << "ERROR in CuBLAS device test\n"; return 1; } } #endif //**********************************************************************// void CudaContext::init() { DEBUG_OUT("Init context..."); if (gpu_id_ >= 0) { CUDA_CALL(hipSetDevice(gpu_id_)); } else { CUDA_CALL(hipGetDevice(&gpu_id_)); } env_ = new CublasEnvironment(gpu_id_); stream_id_ = 0; rng_created_ = false; shared_ = false; non_blocking_ = true; CUDA_CALL(hipEventCreate(&event_)); prop_ = new hipDeviceProp_t(); CUDA_CALL(hipGetDeviceProperties(prop_, gpu_id_)); } CudaContext::CudaContext(int gpu_id, bool non_blocking) : gpu_id_(gpu_id), non_blocking_(non_blocking) { DEBUG_OUT("Create context on GPU " << gpu_id); this->init(); this->getStream(0); } CudaContext::CudaContext(hipStream_t shared_stream, int gpu_id) : gpu_id_(gpu_id) { DEBUG_OUT("Create context on GPU " << gpu_id << " with shared stream (on id 0)\n"); this->init(); // ignore the test for shared stream 0. Pytorch seem to like 0 // if (!shared_stream) { // RPU_FATAL("Shared stream should not be NULL!"); //} else { shared_ = true; streams_.push_back(shared_stream); // } } CudaContext::~CudaContext() { DEBUG_OUT("Destroy Cuda Context..."); enforceDeviceId(); if (env_ != nullptr) { int i_start = shared_ ? 1 : 0; for (int i = i_start; i < streams_.size(); i++) { hipStreamSynchronize(streams_[i]); hipStreamDestroy(streams_[i]); } } if (event_ != nullptr) { hipEventDestroy(event_); event_ = nullptr; } if (env_ != nullptr) { delete env_; env_ = nullptr; } if (rng_created_) { hiprandDestroyGenerator(rng_); } if (prop_ != nullptr) { delete prop_; prop_ = nullptr; } DEBUG_OUT("Destroyed."); } // copy constructor CudaContext::CudaContext(const CudaContext &other) { // only stream idx 0 is ever shared ! // copy construction will share the stream. // random generator etc are NOT shared ! gpu_id_ = other.gpu_id_; this->init(); shared_ = true; non_blocking_ = other.non_blocking_; // only stream 0 is ever shared !! if (other.streams_.size() > 0) { streams_.push_back(other.streams_[0]); } for (int i = 1; i < other.streams_.size(); i++) { // rest are new streams!! this->getStream(i); } stream_id_ = other.stream_id_; if (other.rng_created_) { this->createRandomGenerator(); } // random states won't be copied. They will be created a new DEBUG_OUT("CudaContext copy constructed [but only first stream shared. New streams and event!]."); } // copy assignment CudaContext &CudaContext::operator=(const CudaContext &other) { DEBUG_OUT("Copy assignment "); CudaContext tmp(other); swap(*this, tmp); return *this; } // move constructor CudaContext::CudaContext(CudaContext &&other) { *this = std::move(other); DEBUG_OUT("Move constructor "); } // move assignment CudaContext &CudaContext::operator=(CudaContext &&other) { gpu_id_ = other.gpu_id_; stream_id_ = other.stream_id_; shared_ = other.shared_; non_blocking_ = other.non_blocking_; prop_ = other.prop_; other.prop_ = nullptr; streams_ = std::move(other.streams_); env_ = other.env_; other.env_ = nullptr; rng_ = other.rng_; other.rng_ = nullptr; rng_created_ = other.rng_created_; event_ = other.event_; other.event_ = nullptr; shared_random_states_ = std::move(other.shared_random_states_); DEBUG_OUT("Move assignment "); return *this; } void CudaContext::synchronizeContext() const { enforceDeviceId(); for (int i = 0; i < streams_.size(); i++) { CUDA_CALL(hipStreamSynchronize(streams_[i])); } } void CudaContext::enforceDeviceId() const { #ifdef RPU_EXPLICIT_ENFORCE_DEVICE_ID int gpu_id; CUDA_CALL(hipGetDevice(&gpu_id)); if (gpu_id != gpu_id_) { std::cout << "WARNING wrong device detected!" << std::endl; CUDA_CALL(hipSetDevice(gpu_id_)); } #endif } void CudaContext::synchronizeDevice() const { enforceDeviceId(); CUDA_CALL(hipDeviceSynchronize()); } void CudaContext::synchronizeWith(CudaContext *c) const { if (this->getStream() == c->getStream()) { // do nothing since work on the same stream } else { this->synchronize(); c->synchronize(); } } void CudaContext::synchronizeWith(CudaContext *ca, CudaContext *cb) const { if (ca->getStream() != cb->getStream()) { ca->synchronizeWith(cb); } if (ca->getStream() != this->getStream()) { this->synchronize(); } } void CudaContext::synchronizeStream(int idx) const { DEBUG_OUT("Synchronize stream idx " << idx); enforceDeviceId(); if ((idx >= 0) && (idx < streams_.size())) { CUDA_CALL(hipStreamSynchronize(streams_[idx])); } } void CudaContext::synchronizeStream() const { DEBUG_OUT("Synchronize stream id " << stream_id_); enforceDeviceId(); CUDA_CALL(hipStreamSynchronize(streams_[stream_id_])); } int CudaContext::getNBlocks(int size, int nthreads) const { DEBUG_OUT("get NBlocks for size " << size); return (size + nthreads - 1) / nthreads; } int CudaContext::getNStrideBlocks(int size, int nthreads) const { DEBUG_OUT("get N Stride Blocks for size " << size); int max_blocks = getSMCount() * maxThreadsPerBlock() / nthreads; return MIN(getNBlocks(size, nthreads), max_blocks); } hipStream_t CudaContext::getStream(int idx) { enforceDeviceId(); DEBUG_OUT("Try to get streams " << idx); if ((idx >= 0) && (idx < streams_.size())) { if (stream_id_ != idx) { stream_id_ = idx; CUBLAS_CALL(hipblasSetStream(this->getBlasHandle(), streams_[idx])); } return streams_[idx]; } else if (streams_.size() == idx) { hipStream_t s; if (non_blocking_) { CUDA_CALL(hipStreamCreateWithFlags(&s, hipStreamNonBlocking)); } else { CUDA_CALL(hipStreamCreate(&s)); } streams_.push_back(s); stream_id_ = idx; CUBLAS_CALL(hipblasSetStream(this->getBlasHandle(), streams_[idx])); DEBUG_OUT("Created stream id " << idx << " at : " << streams_[idx] << " ( s: " << s << ")"); return streams_[idx]; } else { RPU_FATAL("Requested stream size mismatch."); } } void CudaContext::setStream(hipStream_t s) { if (shared_) { enforceDeviceId(); if (s != streams_[stream_id_]) { if (stream_id_ != 0) { this->synchronizeDevice(); } else { this->synchronizeStream(); } } streams_[0] = s; stream_id_ = 0; } else { RPU_FATAL("setStream: must be shared context."); } } void CudaContext::createRandomGenerator() { if (!rng_created_) { enforceDeviceId(); CURAND_CALL(hiprandCreateGenerator(&rng_, HIPRAND_RNG_PSEUDO_DEFAULT)); CURAND_CALL(hiprandSetStream(rng_, this->getStream())); rng_created_ = true; } } void CudaContext::setRandomSeed(unsigned long long rseed) { enforceDeviceId(); if (!rng_created_) { this->createRandomGenerator(); } unsigned long long seed = rseed; if (rseed == 0) { seed = (unsigned long long)std::chrono::high_resolution_clock::now().time_since_epoch().count(); } else { seed = rseed; } CURAND_CALL(hiprandSetPseudoRandomGeneratorSeed(rng_, seed)); this->synchronizeStream(); } void CudaContext::randNormal(float *dev_array, int size, float mean, float stddev) { if (!rng_created_) { setRandomSeed(0); // will create random generator on the fly } if (stddev > 0) { CURAND_CALL(hiprandGenerateNormal(rng_, dev_array, size, mean, stddev)); } else { RPU::math::elemconst(this, dev_array, size, mean); } } void CudaContext::randUniform(float *dev_array, int size) { if (!rng_created_) { setRandomSeed(0); } CURAND_CALL(hiprandGenerateUniform(rng_, dev_array, size)); } hiprandState_t *CudaContext::getRandomStates(int size) { int n = size; if (n <= 0) { n = getSMCount() * maxThreadsPerBlock(); } if (shared_random_states_.size() <= stream_id_) { shared_random_states_.resize(stream_id_ + 1); } if (!shared_random_states_[stream_id_] || (n > shared_random_states_[stream_id_]->getSize())) { curandSetup(this, shared_random_states_[stream_id_], n, 0, false); } return shared_random_states_[stream_id_]->getData(); } void CudaContext::recordWaitEvent(CudaContext *wait_on_context) { this->recordWaitEvent(wait_on_context->getStream(), wait_on_context->getEvent()); } void CudaContext::recordEvent() { CUDA_CALL(hipEventRecord(event_, streams_[stream_id_])); } void CudaContext::waitEvent(hipEvent_t wait_on_event) { CUDA_CALL(hipStreamWaitEvent(streams_[stream_id_], wait_on_event, 0)); } void CudaContext::waitEvent(CudaContext *wait_on_context) { waitEvent(wait_on_context->getEvent()); } void CudaContext::recordWaitEvent(hipStream_t s) { this->recordWaitEvent(s, event_); } void CudaContext::recordWaitEvent(hipStream_t s, hipEvent_t e) { if (streams_[stream_id_] != s) { CUDA_CALL(hipEventRecord(e, s)); CUDA_CALL(hipStreamWaitEvent(streams_[stream_id_], e, 0)); } } //**********************************************************************// template <typename T> CudaArray<T>::CudaArray(CudaContext *c) : size_(0), width_(0), height_(1), pitch_(0), context_(c) {} template <typename T> CudaArray<T>::CudaArray(CudaContext *c, int n) : CudaArray(c) { size_ = n; width_ = n; height_ = 1; // this needs to be one! No height>1 supported yet if (n > 0) { context_->enforceDeviceId(); CUDA_CALL(hipMallocPitch(&values_, &pitch_, n * sizeof(T), height_)); } } template <typename T> CudaArray<T>::CudaArray(CudaContext *c, int n, const T *host_array) : CudaArray(c, n) { if (n > 0) { this->assign(host_array); context_->synchronize(); // better syncrhonize. Constructing is considered slow anyway } } template <typename T> CudaArray<T>::~CudaArray() { // no sync because no ownership of context !! (might be already destructed) if ((size_ > 0) && (values_ != nullptr) && (!shared_if_)) { context_->enforceDeviceId(); hipFree(values_); values_ = nullptr; } } // copy constructor template <typename T> CudaArray<T>::CudaArray(const CudaArray<T> &other) { size_ = other.size_; width_ = other.width_; height_ = other.height_; pitch_ = other.pitch_; context_ = other.context_; values_ = nullptr; if (size_ > 0) { context_->enforceDeviceId(); CUDA_CALL(hipMallocPitch(&values_, &pitch_, size_ * sizeof(T), height_)); this->assign(other); context_->synchronize(); // better synchronize. Constructing is slow anyway } if (other.shared_if_) { this->setShared(other.values_); } DEBUG_OUT("CudaArray copy constructed."); } // copy assignment template <typename T> CudaArray<T> &CudaArray<T>::operator=(const CudaArray<T> &other) { context_->enforceDeviceId(); CudaArray<T> tmp(other); // seems a bit inefficient... swap(*this, tmp); context_->synchronize(); // need sync because of tmp return *this; } // move constructor template <typename T> CudaArray<T>::CudaArray(CudaArray<T> &&other) { context_->enforceDeviceId(); *this = std::move(other); } // move assignment template <typename T> CudaArray<T> &CudaArray<T>::operator=(CudaArray<T> &&other) { size_ = other.size_; other.size_ = 0; width_ = other.width_; other.width_ = 0; height_ = other.height_; other.height_ = 0; pitch_ = other.pitch_; other.pitch_ = 0; context_ = other.context_; other.context_ = nullptr; values_ = other.values_; other.values_ = nullptr; shared_if_ = other.shared_if_; return *this; } template <typename T> void CudaArray<T>::setConst(T set_value) { DEBUG_OUT( "Set (hsize,P,W,H): " << size_ << ", " << pitch_ << ", " << width_ * sizeof(T) << ", " << height_); if (size_ > 0) { context_->enforceDeviceId(); if (set_value != 0) { RPU::math::elemconst(context_, values_, size_, set_value); } else { CUDA_CALL(hipMemset2DAsync( values_, pitch_, 0, this->getWidthBytes(), height_, context_->getStream())); } } } template <> void CudaArray<curandStateXORWOW>::setConst(curandStateXORWOW set_value) { RPU_FATAL("Cannot set curandstates to some values."); } template <> void CudaArray<double *>::setConst(double *set_value) { RPU_FATAL("Cannot set pointer types to some values."); } template <> void CudaArray<float *>::setConst(float *set_value) { RPU_FATAL("Cannot set pointer types to some values."); } template <typename T> void CudaArray<T>::printValues(int nmax) const { T *values = new T[size_]; this->copyTo(values); // will synchronize int n = nmax > 0 ? MIN(nmax, size_) : size_; for (int i = 0; i < n; ++i) { std::cout << "[" << i << "]:" << values[i] << ", "; } if (n < size_) { std::cout << "..."; } std::cout << std::endl; delete[] values; } template <> void CudaArray<curandStateXORWOW>::printValues(int nmax) const { RPU_FATAL("Cannot print curandstates."); } template <typename T> void CudaArray<T>::assign(const T *host_array) { int sz = size_ * sizeof(T); DEBUG_OUT( "Assign host (hsize,P,W,H): " << sz << ", " << pitch_ << ", " << width_ * sizeof(T) << ", " << height_); context_->enforceDeviceId(); context_->synchronize(); CUDA_CALL(hipMemcpy2DAsync( values_, pitch_, host_array, sz, sz, 1, hipMemcpyHostToDevice, context_->getStream())); } template <typename T> void CudaArray<T>::assignTranspose(const T *host_array, const int m, const int n) { // col major to row major if (m * n != size_) { RPU_FATAL("Size mismatch"); } T *transposed_array = new T[size_]; for (int i = 0; i < size_; i++) { int i_col = (i % n); int i_row = (i / n); transposed_array[i_col * m + i_row] = host_array[i]; } context_->enforceDeviceId(); int sz = size_ * sizeof(T); DEBUG_OUT( "Assign host (hsize,P,W,H): " << sz << ", " << pitch_ << ", " << width_ * sizeof(T) << ", " << height_); context_->synchronize(); CUDA_CALL(hipMemcpy2D( values_, pitch_, transposed_array, sz, sz, 1, hipMemcpyHostToDevice)); // no async delete[] transposed_array; } template <typename T> void CudaArray<T>::assign(const CudaArray<T> &source) { DEBUG_OUT( "Assign device (P,W,H): " << ", " << pitch_ << ", " << width_ * sizeof(T) << ", " << height_); if (source.getSize() != size_) { RPU_FATAL("Assignment of Cuda Array failed. Size mismatch."); } if ((size_ > 0) && (source.getSize() > 0)) { hipStream_t s = context_->getStream(); context_->synchronizeWith(source.getContext()); CUDA_CALL(hipMemcpy2DAsync( values_, pitch_, source.getDataConst(), source.getPitch(), source.getWidthBytes(), 1, hipMemcpyDeviceToDevice, s)); } } template <typename T> void CudaArray<T>::assignFromDevice(const T *device_array) { DEBUG_OUT( "Assign device (P,W,H): " << ", " << pitch_ << ", " << width_ * sizeof(T) << ", " << height_); if ((size_ > 0)) { int sz = size_ * sizeof(T); hipStream_t s = context_->getStream(); context_->synchronizeDevice(); // better do device-wide. Not clear where the device array lives CUDA_CALL( hipMemcpy2DAsync(values_, pitch_, device_array, sz, sz, 1, hipMemcpyDeviceToDevice, s)); } } template <typename T> void CudaArray<T>::setShared(T *device_array) { // destruct if ((size_ > 0) && (values_ != nullptr) && (!shared_if_)) { context_->enforceDeviceId(); CUDA_CALL(hipFree(values_)); values_ = nullptr; } shared_if_ = true; values_ = device_array; // assign memory shared (memory is governed from outside) // Caution: does not CHECK THE SIZE OF THE GIVEN ARRAY! } template <typename T> void CudaArray<T>::copyTo(T *host_array) const { int sz = size_ * sizeof(T); DEBUG_OUT( "Copy to host (hsize,P,W,H): " << sz << ", " << pitch_ << ", " << width_ * sizeof(T) << ", " << height_); if (size_ > 0) { context_->enforceDeviceId(); CUDA_CALL(hipMemcpy2DAsync( host_array, sz, values_, pitch_, this->getWidthBytes(), height_, hipMemcpyDeviceToHost, context_->getStream())); context_->synchronizeStream(); } } template <typename T> T *CudaArray<T>::getDataSafe(CudaContext *c) { context_->synchronizeWith(c); return values_; } #ifdef RPU_USE_DOUBLE template class CudaArray<double>; template class CudaArray<double *>; #endif template class CudaArray<float>; template class CudaArray<float *>; template class CudaArray<int>; template class CudaArray<char>; template class CudaArray<uint32_t>; template class CudaArray<uint64_t>; template class CudaArray<curandStateXORWOW>; // reset void resetCuda(int gpu_id) { if (gpu_id >= 0) { CUDA_CALL(hipSetDevice(gpu_id)); } CUDA_CALL(hipDeviceReset()); CUDA_CALL(hipFree(0)); CUDA_CALL(hipDeviceSynchronize()); } } // namespace RPU
033001c92025c06a136f8c26af27a6b9584ec3bc.cu
/** * (C) Copyright 2020, 2021 IBM. All Rights Reserved. * * This code is licensed under the Apache License, Version 2.0. You may * obtain a copy of this license in the LICENSE.txt file in the root directory * of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. * * Any modifications or derivative works of this code must retain this * copyright notice, and modified files need to carry a notice indicating * that they have been altered from the originals. */ #include "cuda_math_util.h" #include "cuda_util.h" #include "utility_functions.h" #include <chrono> #include <iostream> #include <memory> #define IDX2F(i, j, ld) ((((j)-1) * (ld)) + ((i)-1)) // this should be not necesary, because device id is set individually // per thread. However, if one would want to use 2 GPUs within one // thread, one needs it. #define RPU_EXPLICIT_ENFORCE_DEVICE_ID namespace RPU { __global__ void kernelCurandSetup(unsigned long long rseed, curandState_t *state, int n) { int id = threadIdx.x + blockIdx.x * blockDim.x; /* Each thread gets same seed, a different sequence number, no offset */ if (id < n) { curand_init(rseed, id, 0, &state[id]); } } __global__ void kernelCurandSetupSameSeed(unsigned long long rseed, curandState_t *state, int n) { int id = threadIdx.x + blockIdx.x * blockDim.x; if (id < n) { curand_init(rseed, 0, 0, &state[id]); } } void curandSetup(CudaArray<curandState_t> &dev_states, unsigned long long rseed, bool same_seed) { unsigned long long seed = rseed; if (rseed == 0) { seed = (unsigned long long)std::chrono::high_resolution_clock::now().time_since_epoch().count(); } else { seed = rseed; } CudaContext *c = dev_states.getContext(); int m = dev_states.getSize(); int nthreads = c->getNThreads(); int nblocks = c->getNBlocks(m, nthreads); if (same_seed) { kernelCurandSetupSameSeed<<<nblocks, nthreads, 0, c->getStream()>>>( seed, dev_states.getData(), m); } else { kernelCurandSetup<<<nblocks, nthreads, 0, c->getStream()>>>(seed, dev_states.getData(), m); } c->synchronize(); } void curandSetup( CudaContext *c, std::unique_ptr<CudaArray<curandState_t>> &dev_states, int n, unsigned long long rseed, bool same_seed) { int m = (n + 31) / 32 * 32; c->synchronizeDevice(); dev_states = std::unique_ptr<CudaArray<curandState_t>>(new CudaArray<curandState_t>(c, m)); curandSetup(*dev_states, rseed, same_seed); } CublasEnvironment::~CublasEnvironment() { DEBUG_OUT("Destroy BLAS env."); // DEBUG_OUT("handle : " <<this->handle_); // destroy device // destroy host if (handle_ != nullptr) { cublasDestroy(handle_); DEBUG_OUT("CUBLAS destroyed"); } #ifdef RPU_WITH_CUBLAS_DEVICE if (device_handle_created_) { DEBUG_OUT("destroy device handle"); kernelCublasDestroy<<<1, 1>>>(device_handle_); cudaDeviceSynchronize(); cudaFree(device_handle_); DEBUG_OUT("CUBLAS device destroyed"); } #endif // cudaDeviceReset(); } CublasEnvironment::CublasEnvironment(int gpu_id) { DEBUG_OUT("GET BLAS env."); if (gpu_id >= 0) CUDA_CALL(cudaSetDevice(gpu_id)); // create host cublasStatus_t stat = cublasCreate(&handle_); CUDA_CALL(cudaDeviceSynchronize()); // DEBUG_CALL(this->test();); // DEBUG_OUT("handle : " <<handle_); if (stat != CUBLAS_STATUS_SUCCESS) { RPU_FATAL("CUBLAS initialization failed"); } else DEBUG_OUT("CUBLAS Host initialized."); #ifdef RPU_WITH_CUBLAS_DEVICE device_handle_created_ = false; #endif } void CublasEnvironment::test() { this->runTest(); #ifdef RPU_WITH_CUBLAS_DEVICE if (device_handle_created_) { this->runTestDevice(); } #endif } static __inline__ void modifyS(cublasHandle_t handle, float *m, int ldm, int n, int p, int q, float alpha, float beta) { cublasSscal(handle, n - p + 1, &alpha, &m[IDX2F(p, q, ldm)], ldm); cublasSscal(handle, ldm - p + 1, &beta, &m[IDX2F(p, q, ldm)], 1); } int CublasEnvironment::runTest() { // make a test run cublasStatus_t stat; int i, j; int M = 5; int N = 6; float *devPtrA; float *a = 0; a = (float *)malloc(M * N * sizeof(*a)); if (!a) { std::cout << "CUBLAS test run failed (malloc)\n"; return 1; } for (j = 1; j <= N; j++) { for (i = 1; i <= M; i++) { a[IDX2F(i, j, M)] = (float)((i - 1) * M + j); } } if (cudaMalloc((void **)&devPtrA, M * N * sizeof(*a)) != cudaSuccess) { std::cerr << "CUBLAS test run failed (cudaMalloc)\n"; free(a); return 1; } modifyS(handle_, devPtrA, M, N, 2, 3, 16.0f, 12.0f); stat = cublasSetMatrix(M, N, sizeof(*a), a, M, devPtrA, M); if (stat != CUBLAS_STATUS_SUCCESS) { std::cerr << "CUBLAS test run failed (data download)\n"; cudaFree(devPtrA); free(a); return 1; } stat = cublasGetMatrix(M, N, sizeof(*a), devPtrA, M, a, M); if (stat != CUBLAS_STATUS_SUCCESS) { std::cerr << "CUBLAS test run failed (data upload)\n"; cudaFree(devPtrA); free(a); return 1; } cudaFree(devPtrA); for (j = 1; j <= N; j++) { for (i = 1; i <= M; i++) { std::cout << a[IDX2F(i, j, M)] << ","; } std::cout << std::endl; } free(a); std::cout << "CUBLAS test run successful.\n"; return 0; } #ifdef RPU_WITH_CUBLAS_DEVICE __global__ void kernelCublasDestroy(cublasHandle_t *device_handle) { cublasStatus_t status = cublasDestroy(*device_handle); cudaDeviceSynchronize(); if (status != CUBLAS_STATUS_SUCCESS) { printf("ERROR in destroying cublas device!\n"); } } __global__ void kernelCublasCreateDevice(cublasHandle_t *device_handle) { cublasStatus_t status = cublasCreate(device_handle); cudaDeviceSynchronize(); if (status != CUBLAS_STATUS_SUCCESS) { printf("ERROR in creating cublas device!\n"); return; } } void CublasEnvironment::createDeviceHandle() { if (device_handle_created_) return; CUDA_CALL(cudaMalloc(&device_handle_, sizeof(cublasHandle_t))); CUDA_CALL(cudaDeviceSynchronize()); kernelCublasCreateDevice<<<1, 1>>>(device_handle_); CUDA_CALL(cudaDeviceSynchronize()); DEBUG_OUT("Created device handle"); device_handle_created_ = true; } cublasHandle_t *CublasEnvironment::getDeviceHandle() { if (!device_handle_created_) { this->createDeviceHandle(); } return device_handle_; } __global__ void kernelCublasTest(cublasHandle_t *device_handle, float *source, float *dest) { cublasStatus_t status = cublasScopy(*device_handle, 1, source, 1, dest, 1); cudaDeviceSynchronize(); if ((status != CUBLAS_STATUS_SUCCESS)) { printf("Some problems with the CuBLAS device test.\n"); } } int CublasEnvironment::runTestDevice() { float one = 1; float zero = 0; float *a; float *b; CUDA_CALL(cudaMalloc(&a, sizeof(float))); CUDA_CALL(cudaMalloc(&b, sizeof(float))); CUDA_CALL(cudaMemcpy(a, &one, sizeof(float), cudaMemcpyHostToDevice)); CUDA_CALL(cudaMemcpy(b, &zero, sizeof(float), cudaMemcpyHostToDevice)); kernelCublasTest<<<1, 1>>>(device_handle_, a, b); CUDA_CALL(cudaDeviceSynchronize()); CUDA_CALL(cudaMemcpy(&zero, b, sizeof(float), cudaMemcpyDeviceToHost)); CUDA_CALL(cudaFree(a)); CUDA_CALL(cudaFree(b)); if (zero == 1) { std::cout << "CuBLAS device test succeded\n"; return 0; } else { std::cerr << "ERROR in CuBLAS device test\n"; return 1; } } #endif //**********************************************************************// void CudaContext::init() { DEBUG_OUT("Init context..."); if (gpu_id_ >= 0) { CUDA_CALL(cudaSetDevice(gpu_id_)); } else { CUDA_CALL(cudaGetDevice(&gpu_id_)); } env_ = new CublasEnvironment(gpu_id_); stream_id_ = 0; rng_created_ = false; shared_ = false; non_blocking_ = true; CUDA_CALL(cudaEventCreate(&event_)); prop_ = new cudaDeviceProp(); CUDA_CALL(cudaGetDeviceProperties(prop_, gpu_id_)); } CudaContext::CudaContext(int gpu_id, bool non_blocking) : gpu_id_(gpu_id), non_blocking_(non_blocking) { DEBUG_OUT("Create context on GPU " << gpu_id); this->init(); this->getStream(0); } CudaContext::CudaContext(cudaStream_t shared_stream, int gpu_id) : gpu_id_(gpu_id) { DEBUG_OUT("Create context on GPU " << gpu_id << " with shared stream (on id 0)\n"); this->init(); // ignore the test for shared stream 0. Pytorch seem to like 0 // if (!shared_stream) { // RPU_FATAL("Shared stream should not be NULL!"); //} else { shared_ = true; streams_.push_back(shared_stream); // } } CudaContext::~CudaContext() { DEBUG_OUT("Destroy Cuda Context..."); enforceDeviceId(); if (env_ != nullptr) { int i_start = shared_ ? 1 : 0; for (int i = i_start; i < streams_.size(); i++) { cudaStreamSynchronize(streams_[i]); cudaStreamDestroy(streams_[i]); } } if (event_ != nullptr) { cudaEventDestroy(event_); event_ = nullptr; } if (env_ != nullptr) { delete env_; env_ = nullptr; } if (rng_created_) { curandDestroyGenerator(rng_); } if (prop_ != nullptr) { delete prop_; prop_ = nullptr; } DEBUG_OUT("Destroyed."); } // copy constructor CudaContext::CudaContext(const CudaContext &other) { // only stream idx 0 is ever shared ! // copy construction will share the stream. // random generator etc are NOT shared ! gpu_id_ = other.gpu_id_; this->init(); shared_ = true; non_blocking_ = other.non_blocking_; // only stream 0 is ever shared !! if (other.streams_.size() > 0) { streams_.push_back(other.streams_[0]); } for (int i = 1; i < other.streams_.size(); i++) { // rest are new streams!! this->getStream(i); } stream_id_ = other.stream_id_; if (other.rng_created_) { this->createRandomGenerator(); } // random states won't be copied. They will be created a new DEBUG_OUT("CudaContext copy constructed [but only first stream shared. New streams and event!]."); } // copy assignment CudaContext &CudaContext::operator=(const CudaContext &other) { DEBUG_OUT("Copy assignment "); CudaContext tmp(other); swap(*this, tmp); return *this; } // move constructor CudaContext::CudaContext(CudaContext &&other) { *this = std::move(other); DEBUG_OUT("Move constructor "); } // move assignment CudaContext &CudaContext::operator=(CudaContext &&other) { gpu_id_ = other.gpu_id_; stream_id_ = other.stream_id_; shared_ = other.shared_; non_blocking_ = other.non_blocking_; prop_ = other.prop_; other.prop_ = nullptr; streams_ = std::move(other.streams_); env_ = other.env_; other.env_ = nullptr; rng_ = other.rng_; other.rng_ = nullptr; rng_created_ = other.rng_created_; event_ = other.event_; other.event_ = nullptr; shared_random_states_ = std::move(other.shared_random_states_); DEBUG_OUT("Move assignment "); return *this; } void CudaContext::synchronizeContext() const { enforceDeviceId(); for (int i = 0; i < streams_.size(); i++) { CUDA_CALL(cudaStreamSynchronize(streams_[i])); } } void CudaContext::enforceDeviceId() const { #ifdef RPU_EXPLICIT_ENFORCE_DEVICE_ID int gpu_id; CUDA_CALL(cudaGetDevice(&gpu_id)); if (gpu_id != gpu_id_) { std::cout << "WARNING wrong device detected!" << std::endl; CUDA_CALL(cudaSetDevice(gpu_id_)); } #endif } void CudaContext::synchronizeDevice() const { enforceDeviceId(); CUDA_CALL(cudaDeviceSynchronize()); } void CudaContext::synchronizeWith(CudaContext *c) const { if (this->getStream() == c->getStream()) { // do nothing since work on the same stream } else { this->synchronize(); c->synchronize(); } } void CudaContext::synchronizeWith(CudaContext *ca, CudaContext *cb) const { if (ca->getStream() != cb->getStream()) { ca->synchronizeWith(cb); } if (ca->getStream() != this->getStream()) { this->synchronize(); } } void CudaContext::synchronizeStream(int idx) const { DEBUG_OUT("Synchronize stream idx " << idx); enforceDeviceId(); if ((idx >= 0) && (idx < streams_.size())) { CUDA_CALL(cudaStreamSynchronize(streams_[idx])); } } void CudaContext::synchronizeStream() const { DEBUG_OUT("Synchronize stream id " << stream_id_); enforceDeviceId(); CUDA_CALL(cudaStreamSynchronize(streams_[stream_id_])); } int CudaContext::getNBlocks(int size, int nthreads) const { DEBUG_OUT("get NBlocks for size " << size); return (size + nthreads - 1) / nthreads; } int CudaContext::getNStrideBlocks(int size, int nthreads) const { DEBUG_OUT("get N Stride Blocks for size " << size); int max_blocks = getSMCount() * maxThreadsPerBlock() / nthreads; return MIN(getNBlocks(size, nthreads), max_blocks); } cudaStream_t CudaContext::getStream(int idx) { enforceDeviceId(); DEBUG_OUT("Try to get streams " << idx); if ((idx >= 0) && (idx < streams_.size())) { if (stream_id_ != idx) { stream_id_ = idx; CUBLAS_CALL(cublasSetStream(this->getBlasHandle(), streams_[idx])); } return streams_[idx]; } else if (streams_.size() == idx) { cudaStream_t s; if (non_blocking_) { CUDA_CALL(cudaStreamCreateWithFlags(&s, cudaStreamNonBlocking)); } else { CUDA_CALL(cudaStreamCreate(&s)); } streams_.push_back(s); stream_id_ = idx; CUBLAS_CALL(cublasSetStream(this->getBlasHandle(), streams_[idx])); DEBUG_OUT("Created stream id " << idx << " at : " << streams_[idx] << " ( s: " << s << ")"); return streams_[idx]; } else { RPU_FATAL("Requested stream size mismatch."); } } void CudaContext::setStream(cudaStream_t s) { if (shared_) { enforceDeviceId(); if (s != streams_[stream_id_]) { if (stream_id_ != 0) { this->synchronizeDevice(); } else { this->synchronizeStream(); } } streams_[0] = s; stream_id_ = 0; } else { RPU_FATAL("setStream: must be shared context."); } } void CudaContext::createRandomGenerator() { if (!rng_created_) { enforceDeviceId(); CURAND_CALL(curandCreateGenerator(&rng_, CURAND_RNG_PSEUDO_DEFAULT)); CURAND_CALL(curandSetStream(rng_, this->getStream())); rng_created_ = true; } } void CudaContext::setRandomSeed(unsigned long long rseed) { enforceDeviceId(); if (!rng_created_) { this->createRandomGenerator(); } unsigned long long seed = rseed; if (rseed == 0) { seed = (unsigned long long)std::chrono::high_resolution_clock::now().time_since_epoch().count(); } else { seed = rseed; } CURAND_CALL(curandSetPseudoRandomGeneratorSeed(rng_, seed)); this->synchronizeStream(); } void CudaContext::randNormal(float *dev_array, int size, float mean, float stddev) { if (!rng_created_) { setRandomSeed(0); // will create random generator on the fly } if (stddev > 0) { CURAND_CALL(curandGenerateNormal(rng_, dev_array, size, mean, stddev)); } else { RPU::math::elemconst(this, dev_array, size, mean); } } void CudaContext::randUniform(float *dev_array, int size) { if (!rng_created_) { setRandomSeed(0); } CURAND_CALL(curandGenerateUniform(rng_, dev_array, size)); } curandState_t *CudaContext::getRandomStates(int size) { int n = size; if (n <= 0) { n = getSMCount() * maxThreadsPerBlock(); } if (shared_random_states_.size() <= stream_id_) { shared_random_states_.resize(stream_id_ + 1); } if (!shared_random_states_[stream_id_] || (n > shared_random_states_[stream_id_]->getSize())) { curandSetup(this, shared_random_states_[stream_id_], n, 0, false); } return shared_random_states_[stream_id_]->getData(); } void CudaContext::recordWaitEvent(CudaContext *wait_on_context) { this->recordWaitEvent(wait_on_context->getStream(), wait_on_context->getEvent()); } void CudaContext::recordEvent() { CUDA_CALL(cudaEventRecord(event_, streams_[stream_id_])); } void CudaContext::waitEvent(cudaEvent_t wait_on_event) { CUDA_CALL(cudaStreamWaitEvent(streams_[stream_id_], wait_on_event, 0)); } void CudaContext::waitEvent(CudaContext *wait_on_context) { waitEvent(wait_on_context->getEvent()); } void CudaContext::recordWaitEvent(cudaStream_t s) { this->recordWaitEvent(s, event_); } void CudaContext::recordWaitEvent(cudaStream_t s, cudaEvent_t e) { if (streams_[stream_id_] != s) { CUDA_CALL(cudaEventRecord(e, s)); CUDA_CALL(cudaStreamWaitEvent(streams_[stream_id_], e, 0)); } } //**********************************************************************// template <typename T> CudaArray<T>::CudaArray(CudaContext *c) : size_(0), width_(0), height_(1), pitch_(0), context_(c) {} template <typename T> CudaArray<T>::CudaArray(CudaContext *c, int n) : CudaArray(c) { size_ = n; width_ = n; height_ = 1; // this needs to be one! No height>1 supported yet if (n > 0) { context_->enforceDeviceId(); CUDA_CALL(cudaMallocPitch(&values_, &pitch_, n * sizeof(T), height_)); } } template <typename T> CudaArray<T>::CudaArray(CudaContext *c, int n, const T *host_array) : CudaArray(c, n) { if (n > 0) { this->assign(host_array); context_->synchronize(); // better syncrhonize. Constructing is considered slow anyway } } template <typename T> CudaArray<T>::~CudaArray() { // no sync because no ownership of context !! (might be already destructed) if ((size_ > 0) && (values_ != nullptr) && (!shared_if_)) { context_->enforceDeviceId(); cudaFree(values_); values_ = nullptr; } } // copy constructor template <typename T> CudaArray<T>::CudaArray(const CudaArray<T> &other) { size_ = other.size_; width_ = other.width_; height_ = other.height_; pitch_ = other.pitch_; context_ = other.context_; values_ = nullptr; if (size_ > 0) { context_->enforceDeviceId(); CUDA_CALL(cudaMallocPitch(&values_, &pitch_, size_ * sizeof(T), height_)); this->assign(other); context_->synchronize(); // better synchronize. Constructing is slow anyway } if (other.shared_if_) { this->setShared(other.values_); } DEBUG_OUT("CudaArray copy constructed."); } // copy assignment template <typename T> CudaArray<T> &CudaArray<T>::operator=(const CudaArray<T> &other) { context_->enforceDeviceId(); CudaArray<T> tmp(other); // seems a bit inefficient... swap(*this, tmp); context_->synchronize(); // need sync because of tmp return *this; } // move constructor template <typename T> CudaArray<T>::CudaArray(CudaArray<T> &&other) { context_->enforceDeviceId(); *this = std::move(other); } // move assignment template <typename T> CudaArray<T> &CudaArray<T>::operator=(CudaArray<T> &&other) { size_ = other.size_; other.size_ = 0; width_ = other.width_; other.width_ = 0; height_ = other.height_; other.height_ = 0; pitch_ = other.pitch_; other.pitch_ = 0; context_ = other.context_; other.context_ = nullptr; values_ = other.values_; other.values_ = nullptr; shared_if_ = other.shared_if_; return *this; } template <typename T> void CudaArray<T>::setConst(T set_value) { DEBUG_OUT( "Set (hsize,P,W,H): " << size_ << ", " << pitch_ << ", " << width_ * sizeof(T) << ", " << height_); if (size_ > 0) { context_->enforceDeviceId(); if (set_value != 0) { RPU::math::elemconst(context_, values_, size_, set_value); } else { CUDA_CALL(cudaMemset2DAsync( values_, pitch_, 0, this->getWidthBytes(), height_, context_->getStream())); } } } template <> void CudaArray<curandStateXORWOW>::setConst(curandStateXORWOW set_value) { RPU_FATAL("Cannot set curandstates to some values."); } template <> void CudaArray<double *>::setConst(double *set_value) { RPU_FATAL("Cannot set pointer types to some values."); } template <> void CudaArray<float *>::setConst(float *set_value) { RPU_FATAL("Cannot set pointer types to some values."); } template <typename T> void CudaArray<T>::printValues(int nmax) const { T *values = new T[size_]; this->copyTo(values); // will synchronize int n = nmax > 0 ? MIN(nmax, size_) : size_; for (int i = 0; i < n; ++i) { std::cout << "[" << i << "]:" << values[i] << ", "; } if (n < size_) { std::cout << "..."; } std::cout << std::endl; delete[] values; } template <> void CudaArray<curandStateXORWOW>::printValues(int nmax) const { RPU_FATAL("Cannot print curandstates."); } template <typename T> void CudaArray<T>::assign(const T *host_array) { int sz = size_ * sizeof(T); DEBUG_OUT( "Assign host (hsize,P,W,H): " << sz << ", " << pitch_ << ", " << width_ * sizeof(T) << ", " << height_); context_->enforceDeviceId(); context_->synchronize(); CUDA_CALL(cudaMemcpy2DAsync( values_, pitch_, host_array, sz, sz, 1, cudaMemcpyHostToDevice, context_->getStream())); } template <typename T> void CudaArray<T>::assignTranspose(const T *host_array, const int m, const int n) { // col major to row major if (m * n != size_) { RPU_FATAL("Size mismatch"); } T *transposed_array = new T[size_]; for (int i = 0; i < size_; i++) { int i_col = (i % n); int i_row = (i / n); transposed_array[i_col * m + i_row] = host_array[i]; } context_->enforceDeviceId(); int sz = size_ * sizeof(T); DEBUG_OUT( "Assign host (hsize,P,W,H): " << sz << ", " << pitch_ << ", " << width_ * sizeof(T) << ", " << height_); context_->synchronize(); CUDA_CALL(cudaMemcpy2D( values_, pitch_, transposed_array, sz, sz, 1, cudaMemcpyHostToDevice)); // no async delete[] transposed_array; } template <typename T> void CudaArray<T>::assign(const CudaArray<T> &source) { DEBUG_OUT( "Assign device (P,W,H): " << ", " << pitch_ << ", " << width_ * sizeof(T) << ", " << height_); if (source.getSize() != size_) { RPU_FATAL("Assignment of Cuda Array failed. Size mismatch."); } if ((size_ > 0) && (source.getSize() > 0)) { cudaStream_t s = context_->getStream(); context_->synchronizeWith(source.getContext()); CUDA_CALL(cudaMemcpy2DAsync( values_, pitch_, source.getDataConst(), source.getPitch(), source.getWidthBytes(), 1, cudaMemcpyDeviceToDevice, s)); } } template <typename T> void CudaArray<T>::assignFromDevice(const T *device_array) { DEBUG_OUT( "Assign device (P,W,H): " << ", " << pitch_ << ", " << width_ * sizeof(T) << ", " << height_); if ((size_ > 0)) { int sz = size_ * sizeof(T); cudaStream_t s = context_->getStream(); context_->synchronizeDevice(); // better do device-wide. Not clear where the device array lives CUDA_CALL( cudaMemcpy2DAsync(values_, pitch_, device_array, sz, sz, 1, cudaMemcpyDeviceToDevice, s)); } } template <typename T> void CudaArray<T>::setShared(T *device_array) { // destruct if ((size_ > 0) && (values_ != nullptr) && (!shared_if_)) { context_->enforceDeviceId(); CUDA_CALL(cudaFree(values_)); values_ = nullptr; } shared_if_ = true; values_ = device_array; // assign memory shared (memory is governed from outside) // Caution: does not CHECK THE SIZE OF THE GIVEN ARRAY! } template <typename T> void CudaArray<T>::copyTo(T *host_array) const { int sz = size_ * sizeof(T); DEBUG_OUT( "Copy to host (hsize,P,W,H): " << sz << ", " << pitch_ << ", " << width_ * sizeof(T) << ", " << height_); if (size_ > 0) { context_->enforceDeviceId(); CUDA_CALL(cudaMemcpy2DAsync( host_array, sz, values_, pitch_, this->getWidthBytes(), height_, cudaMemcpyDeviceToHost, context_->getStream())); context_->synchronizeStream(); } } template <typename T> T *CudaArray<T>::getDataSafe(CudaContext *c) { context_->synchronizeWith(c); return values_; } #ifdef RPU_USE_DOUBLE template class CudaArray<double>; template class CudaArray<double *>; #endif template class CudaArray<float>; template class CudaArray<float *>; template class CudaArray<int>; template class CudaArray<char>; template class CudaArray<uint32_t>; template class CudaArray<uint64_t>; template class CudaArray<curandStateXORWOW>; // reset void resetCuda(int gpu_id) { if (gpu_id >= 0) { CUDA_CALL(cudaSetDevice(gpu_id)); } CUDA_CALL(cudaDeviceReset()); CUDA_CALL(cudaFree(0)); CUDA_CALL(cudaDeviceSynchronize()); } } // namespace RPU
e5554e08038f0d5baec28162d0844bb99fe343ea.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/Context.h> #include <ATen/hip/HIPContext.h> #include <ATen/Dispatch.h> #include <ATen/NativeFunctions.h> #include <ATen/hip/PinnedMemoryAllocator.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <ATen/hip/detail/IndexUtils.cuh> #include <ATen/hip/HIPSolver.h> #include <ATen/hip/HIPBlas.h> #include <ATen/hip/HIPEvent.h> #include <ATen/hip/impl/HIPStreamMasqueradingAsCUDA.h> #include <ATen/native/LinearAlgebraUtils.h> #include <ATen/native/hip/MiscUtils.h> #include <ATen/native/hip/BatchLinearAlgebraLib.h> namespace at { namespace native { // Some cuBLAS and cuSOLVER batched routines require input to be a device array of pointers to device individual matrices // 'input' must be a contiguous tensor template <typename scalar_t> static Tensor get_device_pointers(const Tensor& input) { auto input_data = input.data_ptr<scalar_t>(); int64_t input_mat_stride = matrixStride(input); // cublas/cusolver interface requires 'int' int batch_size = cuda_int_cast(batchCount(input), "batch_size"); // if batch_size==0, then start=0 and end=0 // if input_mat_stride==0, then step=sizeof(scalar_t) return at::arange( /*start=*/reinterpret_cast<int64_t>(input_data), /*end=*/reinterpret_cast<int64_t>(input_data + batch_size * input_mat_stride), /*step=*/static_cast<int64_t>(std::max<int64_t>(input_mat_stride, 1) * sizeof(scalar_t)), input.options().dtype(at::kLong)); } template <typename scalar_t> void apply_geqrf_batched(const Tensor& input, const Tensor& tau) { // AMD ROCm backend is implemented via rewriting all CUDA calls to HIP // rocBLAS does not implement BLAS-like extensions of cuBLAS, they're in rocSOLVER // rocSOLVER is currently not used in ATen, therefore we raise an error in this case #ifndef CUDART_VERSION TORCH_CHECK(false, "geqrf: Batched version is supported only with cuBLAS backend.") #else auto batch_size = cuda_int_cast(batchCount(input), "batch_size"); auto m = cuda_int_cast(input.size(-2), "m"); auto n = cuda_int_cast(input.size(-1), "n"); auto lda = std::max<int>(1, m); // cuBLAS batched geqrf requires input to be the device array of pointers to device single matrices Tensor input_ptr_array = get_device_pointers<scalar_t>(input); Tensor tau_ptr_array = get_device_pointers<scalar_t>(tau.unsqueeze(-1)); auto input_ptr_array_data = reinterpret_cast<scalar_t**>(input_ptr_array.data_ptr()); auto tau_ptr_array_data = reinterpret_cast<scalar_t**>(tau_ptr_array.data_ptr()); int info; auto handle = at::cuda::getCurrentCUDABlasHandle(); at::cuda::blas::geqrfBatched(handle, m, n, input_ptr_array_data, lda, tau_ptr_array_data, &info, batch_size); // info only indicates wrong arguments to geqrfBatched call // info is a host variable, we can check it without device synchronization TORCH_INTERNAL_ASSERT(info == 0); #endif } void geqrf_batched_cublas(const Tensor& input, const Tensor& tau) { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(input.scalar_type(), "geqrf_batched_cuda", [&]{ apply_geqrf_batched<scalar_t>(input, tau); }); } template <typename scalar_t> static void apply_triangular_solve(Tensor& A, Tensor& B, bool upper, bool transpose, bool conjugate_transpose, bool unitriangular) { hipblasFillMode_t uplo = upper ? HIPBLAS_FILL_MODE_UPPER : HIPBLAS_FILL_MODE_LOWER; hipblasOperation_t trans = transpose ? HIPBLAS_OP_T : HIPBLAS_OP_N; trans = conjugate_transpose ? HIPBLAS_OP_C : trans; hipblasDiagType_t diag = unitriangular ? HIPBLAS_DIAG_UNIT : HIPBLAS_DIAG_NON_UNIT; hipblasSideMode_t side = HIPBLAS_SIDE_LEFT; auto A_data = A.data_ptr<scalar_t>(); auto B_data = B.data_ptr<scalar_t>(); auto A_mat_stride = matrixStride(A); auto B_mat_stride = matrixStride(B); auto batch_size = batchCount(A); auto n = cuda_int_cast(A.size(-2), "n"); auto nrhs = cuda_int_cast(B.size(-1), "nrhs"); auto lda = std::max<int>(1, n); auto alpha = scalar_t{1}; for (decltype(batch_size) i = 0; i < batch_size; i++) { scalar_t* A_working_ptr = &A_data[i * A_mat_stride]; scalar_t* B_working_ptr = &B_data[i * B_mat_stride]; auto handle = at::cuda::getCurrentCUDABlasHandle(); at::cuda::blas::trsm(handle, side, uplo, trans, diag, n, nrhs, &alpha, A_working_ptr, lda, B_working_ptr, lda); } } void triangular_solve_cublas(Tensor& A, Tensor& B, Tensor& infos, bool upper, bool transpose, bool conjugate_transpose, bool unitriangular) { (void)infos; // unused AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(A.scalar_type(), "triangular_solve_cuda", [&]{ apply_triangular_solve<scalar_t>(A, B, upper, transpose, conjugate_transpose, unitriangular); }); } template <typename scalar_t> static void apply_triangular_solve_batched(Tensor& A, Tensor& B, bool upper, bool transpose, bool conjugate_transpose, bool unitriangular) { hipblasFillMode_t uplo = upper ? HIPBLAS_FILL_MODE_UPPER : HIPBLAS_FILL_MODE_LOWER; hipblasOperation_t trans = transpose ? HIPBLAS_OP_T : HIPBLAS_OP_N; trans = conjugate_transpose ? HIPBLAS_OP_C : trans; hipblasDiagType_t diag = unitriangular ? HIPBLAS_DIAG_UNIT : HIPBLAS_DIAG_NON_UNIT; hipblasSideMode_t side = HIPBLAS_SIDE_LEFT; auto A_data = A.data_ptr<scalar_t>(); auto B_data = B.data_ptr<scalar_t>(); auto A_mat_stride = matrixStride(A); auto B_mat_stride = matrixStride(B); auto batch_size = cuda_int_cast(batchCount(A), "batch_size"); auto n = cuda_int_cast(A.size(-2), "n"); auto nrhs = cuda_int_cast(B.size(-1), "nrhs"); auto lda = std::max<int>(1, n); auto alpha = scalar_t{1}; // cuBLAS batched trsm requires input to be the device array of pointers to device single matrices Tensor A_ptr_array = get_device_pointers<scalar_t>(A); Tensor B_ptr_array = get_device_pointers<scalar_t>(B); auto A_ptr_array_data = reinterpret_cast<scalar_t**>(A_ptr_array.data_ptr()); auto B_ptr_array_data = reinterpret_cast<scalar_t**>(B_ptr_array.data_ptr()); auto handle = at::cuda::getCurrentCUDABlasHandle(); at::cuda::blas::trsmBatched(handle, side, uplo, trans, diag, n, nrhs, &alpha, A_ptr_array_data, lda, B_ptr_array_data, lda, batch_size); } void triangular_solve_batched_cublas(Tensor& A, Tensor& B, Tensor& infos, bool upper, bool transpose, bool conjugate_transpose, bool unitriangular) { (void)infos; // unused AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(A.scalar_type(), "triangular_solve_cuda", [&]{ apply_triangular_solve_batched<scalar_t>(A, B, upper, transpose, conjugate_transpose, unitriangular); }); } #ifdef USE_CUSOLVER inline static Tensor column_major_identity_matrix_like(const Tensor& self) { auto size = self.sizes(); auto size_slice = IntArrayRef(size.data(), size.size()-1); return at::ones(size_slice, self.options()).diag_embed().transpose(-2, -1); } template <typename scalar_t> inline static void _apply_single_inverse_helper(scalar_t* self_ptr, scalar_t* self_inv_ptr, int* ipiv_ptr, int* info_getrf_ptr, int* info_getrs_ptr, int n, int lda) { // self_inv_ptr should already be an identity matrix auto handle = at::cuda::getCurrentCUDASolverDnHandle(); at::cuda::solver::getrf<scalar_t>(handle, n, n, self_ptr, lda, ipiv_ptr, info_getrf_ptr); at::cuda::solver::getrs<scalar_t>(handle, n, n, self_ptr, lda, ipiv_ptr, self_inv_ptr, lda, info_getrs_ptr); } template <typename scalar_t> static void apply_batched_inverse_lib(Tensor& self, Tensor& self_inv, Tensor& infos_getrf, Tensor& infos_getrs) { const int batch_size = cuda_int_cast(batchCount(self), "batchCount"); const int n = cuda_int_cast(self.size(-2), "self.size(-2)"); const int lda = std::max<int>(1, n); auto self_data = self.data_ptr<scalar_t>(); auto self_mat_stride = matrixStride(self); auto self_inv_data = self_inv.data_ptr<scalar_t>(); auto self_inv_mat_stride = matrixStride(self_inv); auto infos_getrf_data = infos_getrf.data_ptr<int>(); auto infos_getrs_data = infos_getrs.data_ptr<int>(); auto& allocator = *::c10::hip::HIPCachingAllocatorMasqueradingAsCUDA::get(); // Heuristic: For small batch size or large matrix size, we use for-loop to iterate over the batches instead of // calling the batched cublas routine. if (batch_size <= 8 || /* batch_size > 8 && */ n >= 512) { for (int64_t i = 0; i < batch_size; i++) { auto dataPtr = allocator.allocate(sizeof(int) * lda); int* pivot = reinterpret_cast<int*>(dataPtr.get()); int* infos_getrf_working_ptr = &infos_getrf_data[i]; int* infos_getrs_working_ptr = &infos_getrs_data[i]; _apply_single_inverse_helper<scalar_t>( &self_data[i * self_mat_stride], &self_inv_data[i * self_inv_mat_stride], pivot, infos_getrf_working_ptr, infos_getrs_working_ptr, n, lda); } } else { // cublas batched kernels require input be "device array of device pointers" Tensor self_array = at::arange( reinterpret_cast<int64_t>(self_data), reinterpret_cast<int64_t>(&self_data[(batch_size-1) * self_mat_stride]) + 1, static_cast<int64_t>(self_mat_stride * sizeof(scalar_t)), self.options().dtype(at::kLong)); Tensor self_inv_array = at::arange( reinterpret_cast<int64_t>(self_inv_data), reinterpret_cast<int64_t>(&self_inv_data[(batch_size-1) * self_inv_mat_stride]) + 1, static_cast<int64_t>(self_inv_mat_stride * sizeof(scalar_t)), self.options().dtype(at::kLong)); auto dataPtr = allocator.allocate(sizeof(int)*batch_size*lda); int* ipiv_array = reinterpret_cast<int*>(dataPtr.get()); at::cuda::blas::getrfBatched<scalar_t>(n, reinterpret_cast<scalar_t**>(self_array.data_ptr()), lda, ipiv_array, infos_getrf_data, batch_size); at::cuda::blas::getriBatched<scalar_t>(n, reinterpret_cast<scalar_t**>(self_array.data_ptr()), lda, ipiv_array, reinterpret_cast<scalar_t**>(self_inv_array.data_ptr()), lda, infos_getrs_data, batch_size); } } template <typename scalar_t> static void apply_single_inverse_lib(const Tensor& self, Tensor& self_inv, Tensor& infos_getrf, Tensor& infos_getrs) { int n = cuda_int_cast(self.size(-2), "self.size(-2)"); int lda = std::max<int>(1, n); Tensor ipiv = at::empty({lda}, self.options().dtype(at::kInt)); _apply_single_inverse_helper<scalar_t>( self.data_ptr<scalar_t>(), self_inv.data_ptr<scalar_t>(), ipiv.data_ptr<int>(), infos_getrf.data_ptr<int>(), infos_getrs.data_ptr<int>(), n, lda); } // This is a type dispatching helper function for 'apply_batched_inverse_lib' and 'apply_single_inverse_lib' Tensor& _linalg_inv_out_helper_cuda_lib(Tensor& result, Tensor& infos_getrf, Tensor& infos_getrs) { // assuming result is in column major order and contains the matrices to invert Tensor input_working_copy = cloneBatchedColumnMajor(result); // for getrf + getrs (cusolver path) // result should be filled with identity matrices result.zero_(); result.diagonal(/*offset=*/0, /*dim1=*/-2, /*dim2=*/-1).fill_(1); const int batch_size = cuda_int_cast(batchCount(result), "batchCount"); if (result.dim() > 2) { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "linalg_inv_out_cuda", [&]{ apply_batched_inverse_lib<scalar_t>( input_working_copy, result, infos_getrf, infos_getrs); }); } else { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "linalg_inv_out_cuda", [&]{ apply_single_inverse_lib<scalar_t>(input_working_copy, result, infos_getrf, infos_getrs); }); } return result; } // entrance of calculations of `inverse` using cusolver getrf + getrs, cublas getrfBatched + getriBatched Tensor _inverse_helper_cuda_lib(const Tensor& self) { Tensor self_working_copy = cloneBatchedColumnMajor(self); Tensor self_inv_working_copy = column_major_identity_matrix_like(self_working_copy); const int batch_size = cuda_int_cast(batchCount(self), "batchCount"); if (self.dim() > 2 && batch_size > 1) { Tensor infos_getrf = at::zeros({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt)); Tensor infos_getrs = at::zeros({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt)); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "inverse_cuda", [&]{ apply_batched_inverse_lib<scalar_t>( self_working_copy, self_inv_working_copy, infos_getrf, infos_getrs); }); batchCheckErrors(infos_getrf, "inverse_cuda"); batchCheckErrors(infos_getrs, "inverse_cuda"); } else { Tensor infos_getrf = at::zeros({1}, self.options().dtype(kInt)); Tensor infos_getrs = at::zeros({1}, self.options().dtype(kInt)); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "inverse_cuda", [&]{ apply_single_inverse_lib<scalar_t>(self_working_copy, self_inv_working_copy, infos_getrf, infos_getrs); }); batchCheckErrors(infos_getrf, "inverse_cuda"); batchCheckErrors(infos_getrs, "inverse_cuda"); } return self_inv_working_copy; } // call cusolver gesvdj function to calculate svd template<typename scalar_t> inline static void _apply_svd_lib_gesvdj(const Tensor& self, Tensor& U, Tensor& S, Tensor& VT, Tensor& infos, bool compute_uv, bool some) { using value_t = typename c10::scalar_value_type<scalar_t>::type; auto self_data = self.data_ptr<scalar_t>(); auto U_data = U.data_ptr<scalar_t>(); auto S_data = S.data_ptr<value_t>(); auto VT_data = VT.data_ptr<scalar_t>(); auto self_stride = matrixStride(self); auto U_stride = matrixStride(U); auto S_stride = S.size(-1); auto VT_stride = matrixStride(VT); int batchsize = cuda_int_cast(batchCount(self), "batch size"); int m = cuda_int_cast(self.size(-2), "m"); int n = cuda_int_cast(self.size(-1), "n"); int lda = std::max<int>(1, m); int ldvt = std::max<int>(1, n); for(int i = 0; i < batchsize; i++){ // gesvdj_params controls the numerical accuracy of cusolver gesvdj iterations on GPU hipsolverGesvdjInfo_t gesvdj_params; TORCH_CUSOLVER_CHECK(hipsolverDnCreateGesvdjInfo(&gesvdj_params)); // TORCH_CUSOLVER_CHECK(hipsolverDnXgesvdjSetTolerance(gesvdj_params, 1.0e-7)); // TORCH_CUSOLVER_CHECK(hipsolverDnXgesvdjSetMaxSweeps(gesvdj_params, 15)); auto handle = at::cuda::getCurrentCUDASolverDnHandle(); auto jobz = compute_uv ? HIPSOLVER_EIG_MODE_VECTOR : HIPSOLVER_EIG_MODE_NOVECTOR; at::cuda::solver::gesvdj<scalar_t>( handle, jobz, /*econ=*/ some ? 1 : 0, m, n, self_data + i * self_stride, lda, S_data + i * S_stride, U_data + i * U_stride, lda, VT_data + i * VT_stride, ldvt, infos.data_ptr<int>() + i, gesvdj_params ); TORCH_CUSOLVER_CHECK(hipsolverDnDestroyGesvdjInfo(gesvdj_params)); } } // wrapper around _apply_svd_lib_gesvdj that handles dtype dispatch, // creates a working copy of the input, and creates V^H from the V returned by gesvdj inline static void apply_svd_lib_gesvdj(const Tensor& self, Tensor& U, Tensor& S, Tensor& VT, Tensor& infos, bool compute_uv, bool some) { const int64_t m = self.size(-2); const int64_t n = self.size(-1); Tensor self_working_copy = cloneBatchedColumnMajor(self); VT = VT.transpose(-2, -1); // gesvdj returns V instead of V^H AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "svd_cuda_gesvdj", [&] { _apply_svd_lib_gesvdj<scalar_t>(self_working_copy, U, S, VT, infos, compute_uv, some); }); } // call cusolver gesvdj batched function to calculate svd template<typename scalar_t> inline static void _apply_svd_lib_gesvdjBatched(const Tensor& self, Tensor& U, Tensor& S, Tensor& VT, Tensor& infos, bool compute_uv) { using value_t = typename c10::scalar_value_type<scalar_t>::type; auto self_data = self.data_ptr<scalar_t>(); auto U_data = U.data_ptr<scalar_t>(); auto S_data = S.data_ptr<value_t>(); auto VT_data = VT.data_ptr<scalar_t>(); auto self_stride = matrixStride(self); auto U_stride = matrixStride(U); auto S_stride = S.size(-1); auto VT_stride = matrixStride(VT); int batchsize = cuda_int_cast(batchCount(self), "batch size"); int m = cuda_int_cast(self.size(-2), "m"); int n = cuda_int_cast(self.size(-1), "n"); int lda = std::max<int>(1, m); int ldvt = std::max<int>(1, n); TORCH_INTERNAL_ASSERT(m <= 32 && n <= 32, "gesvdjBatched requires both matrix dimensions not greater than 32, but got " "m = ", m, " n = ", n); // gesvdj_params controls the numerical accuracy of cusolver gesvdj iterations on GPU hipsolverGesvdjInfo_t gesvdj_params; TORCH_CUSOLVER_CHECK(hipsolverDnCreateGesvdjInfo(&gesvdj_params)); // TORCH_CUSOLVER_CHECK(hipsolverDnXgesvdjSetTolerance(gesvdj_params, 1.0e-7)); // TORCH_CUSOLVER_CHECK(hipsolverDnXgesvdjSetMaxSweeps(gesvdj_params, 15)); TORCH_CUSOLVER_CHECK(hipsolverDnXgesvdjSetSortEig(gesvdj_params, 1)); auto handle = at::cuda::getCurrentCUDASolverDnHandle(); auto jobz = compute_uv ? HIPSOLVER_EIG_MODE_VECTOR : HIPSOLVER_EIG_MODE_NOVECTOR; at::cuda::solver::gesvdjBatched<scalar_t>( handle, jobz, m, n, self_data, lda, S_data, U_data, lda, VT_data, ldvt, infos.data_ptr<int>(), gesvdj_params, batchsize ); TORCH_CUSOLVER_CHECK(hipsolverDnDestroyGesvdjInfo(gesvdj_params)); } // wrapper around _apply_svd_lib_gesvdjBatched that handles dtype dispatch, // creates a working copy of the input, and creates V^H from the V returned by gesvdj inline static void apply_svd_lib_gesvdjBatched(const Tensor& self, Tensor& U, Tensor& S, Tensor& VT, Tensor& infos, bool compute_uv) { const int64_t m = self.size(-2); const int64_t n = self.size(-1); Tensor self_working_copy = cloneBatchedColumnMajor(self); VT = VT.transpose(-2, -1); // gesvdj returns V instead of V^H AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "svd_cuda_gesvdjBatched", [&] { _apply_svd_lib_gesvdjBatched<scalar_t>(self_working_copy, U, S, VT, infos, compute_uv); }); } // entrance of calculations of `svd` using cusolver gesvdj and gesvdjBatched std::tuple<Tensor, Tensor, Tensor> _svd_helper_cuda_lib(const Tensor& self, bool some, bool compute_uv) { const int64_t batch_size = batchCount(self); at::Tensor infos = at::zeros({batch_size}, self.options().dtype(at::kInt)); const int64_t m = self.size(-2); const int64_t n = self.size(-1); const int64_t k = ::min(m, n); Tensor U_working_copy, S_working_copy, VT_working_copy; std::tie(U_working_copy, S_working_copy, VT_working_copy) = \ _create_U_S_VT(self, some, compute_uv, /* svd_use_cusolver = */ true); // U, S, V working copies are already column majored now // heuristic for using `gesvdjBatched` over `gesvdj` if (m <= 32 && n <= 32 && batch_size > 1 && (!some || m == n)) { apply_svd_lib_gesvdjBatched(self, U_working_copy, S_working_copy, VT_working_copy, infos, compute_uv); } else { apply_svd_lib_gesvdj(self, U_working_copy, S_working_copy, VT_working_copy, infos, compute_uv, some); } // A device-host sync will be performed. batchCheckErrors(infos, "svd_cuda"); if (!compute_uv) { VT_working_copy.zero_(); U_working_copy.zero_(); } if (some) { VT_working_copy = VT_working_copy.narrow(-2, 0, k); } // so far we have computed VT, but torch.svd returns V instead. Adjust accordingly. VT_working_copy.transpose_(-2, -1); return std::make_tuple(U_working_copy, S_working_copy, VT_working_copy); } // Todo: cusolverDnXpotrfBatched has some numerical issue and is not used here. // A loop of hipsolverDnXpotrf is used in case MAGMA is not linked in the pytorch build. // We will switch to cusolverDnXpotrfBatched after the issue is fixed. // See https://github.com/pytorch/pytorch/issues/53879. template<typename scalar_t> inline static void apply_cholesky_cusolver(const Tensor& self_working_copy, bool upper, const Tensor& infos) { auto handle = at::cuda::getCurrentCUDASolverDnHandle(); const auto uplo = upper ? HIPBLAS_FILL_MODE_UPPER : HIPBLAS_FILL_MODE_LOWER; const int64_t n = self_working_copy.size(-1); const int64_t lda = std::max<int64_t>(1, n); const int64_t batch_size = batchCount(self_working_copy); const int64_t matrix_stride = matrixStride(self_working_copy); scalar_t* self_working_copy_ptr = self_working_copy.data_ptr<scalar_t>(); int* infos_ptr = infos.data_ptr<int>(); #ifdef USE_CUSOLVER_64_BIT size_t worksize_device; size_t worksize_host; hipsolverDnParams_t params; hipDataType datatype = at::cuda::solver::get_cusolver_datatype<scalar_t>(); TORCH_CUSOLVER_CHECK(cusolverDnCreateParams(&params)); at::cuda::solver::xpotrf_buffersize(handle, params, uplo, n, datatype, nullptr, lda, datatype, &worksize_device, &worksize_host); // allocate workspace storage auto& device_allocator = *at::cuda::getCUDADeviceAllocator(); auto workdata_device = device_allocator.allocate(worksize_device * batch_size); void* workdata_device_ptr = workdata_device.get(); auto& host_allocator = *at::getCPUAllocator(); auto workdata_host = host_allocator.allocate(worksize_host * batch_size); void* workdata_host_ptr = workdata_host.get(); for (int64_t i = 0; i < batch_size; i++) { at::cuda::solver::xpotrf( handle, params, uplo, n, datatype, self_working_copy_ptr + i * matrix_stride, lda, datatype, (char*)workdata_device_ptr + i * worksize_device, worksize_device, (char*)workdata_host_ptr + i * worksize_host, worksize_host, infos_ptr + i ); } TORCH_CUSOLVER_CHECK(cusolverDnDestroyParams(params)); #else // USE_CUSOLVER_64_BIT int n_32 = cuda_int_cast(n, "n"); int lda_32 = cuda_int_cast(lda, "lda"); int lwork; at::cuda::solver::potrf_buffersize<scalar_t>( handle, uplo, n_32, nullptr, lda_32, &lwork); // allocate workspace storage auto& allocator = *at::cuda::getCUDADeviceAllocator(); auto work_data = allocator.allocate(sizeof(scalar_t)*lwork * batch_size); scalar_t* work_data_ptr = static_cast<scalar_t*>(work_data.get()); for (int64_t i = 0; i < batch_size; i++) { at::cuda::solver::potrf<scalar_t>( handle, uplo, n_32, self_working_copy_ptr + i * matrix_stride, lda_32, work_data_ptr + i * lwork, lwork, infos_ptr + i ); } #endif // USE_CUSOLVER_64_BIT } void cholesky_helper_cusolver(const Tensor& input, bool upper, const Tensor& info) { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(input.scalar_type(), "cholesky_cusolver", [&] { apply_cholesky_cusolver<scalar_t>(input, upper, info); }); } template<typename scalar_t> inline static void apply_cholesky_cusolver_potrs(Tensor& self_working_copy, const Tensor& A_column_major_copy, bool upper, Tensor& infos) { auto handle = at::cuda::getCurrentCUDASolverDnHandle(); const auto uplo = upper ? HIPBLAS_FILL_MODE_UPPER : HIPBLAS_FILL_MODE_LOWER; const int64_t n = self_working_copy.size(-2); const int64_t nrhs = self_working_copy.size(-1); const int64_t lda = std::max<int64_t>(1, n); const int64_t batch_size = batchCount(self_working_copy); const int64_t self_matrix_stride = matrixStride(self_working_copy); scalar_t* self_working_copy_ptr = self_working_copy.data_ptr<scalar_t>(); const scalar_t* A_ptr = A_column_major_copy.data_ptr<scalar_t>(); const int64_t A_matrix_stride = matrixStride(A_column_major_copy); const int64_t ldb = std::max<int64_t>(1, A_column_major_copy.size(-1)); int* infos_ptr = infos.data_ptr<int>(); #ifdef USE_CUSOLVER_64_BIT hipsolverDnParams_t params; hipDataType datatype = at::cuda::solver::get_cusolver_datatype<scalar_t>(); TORCH_CUSOLVER_CHECK(cusolverDnCreateParams(&params)); for (int64_t i = 0; i < batch_size; i++) { at::cuda::solver::xpotrs( handle, params, uplo, n, nrhs, datatype, A_ptr + i * A_matrix_stride, lda, datatype, self_working_copy_ptr + i * self_matrix_stride, ldb, infos_ptr ); } TORCH_CUSOLVER_CHECK(cusolverDnDestroyParams(params)); #else // USE_CUSOLVER_64_BIT int n_32 = cuda_int_cast(n, "n"); int nrhs_32 = cuda_int_cast(nrhs, "nrhs"); int lda_32 = cuda_int_cast(lda, "lda"); int ldb_32 = cuda_int_cast(ldb, "ldb"); for (int64_t i = 0; i < batch_size; i++) { at::cuda::solver::potrs<scalar_t>( handle, uplo, n_32, nrhs_32, A_ptr + i * A_matrix_stride, lda_32, self_working_copy_ptr + i * self_matrix_stride, ldb_32, infos_ptr ); } #endif // USE_CUSOLVER_64_BIT } // This code path is only dispatched to if MAGMA is not linked in the pytorch build. // cusolverDn<t>potrsBatched only supports nrhs == 1 template<typename scalar_t> inline static void apply_cholesky_cusolver_potrsBatched(Tensor& self_working_copy, const Tensor& A_column_major_copy, bool upper, Tensor& infos) { auto handle = at::cuda::getCurrentCUDASolverDnHandle(); const auto uplo = upper ? HIPBLAS_FILL_MODE_UPPER : HIPBLAS_FILL_MODE_LOWER; const int64_t n = self_working_copy.size(-2); const int64_t nrhs = self_working_copy.size(-1); const int64_t lda = std::max<int64_t>(1, n); const int64_t batch_size = batchCount(self_working_copy); const int64_t self_matrix_stride = matrixStride(self_working_copy); scalar_t* self_working_copy_ptr = self_working_copy.data_ptr<scalar_t>(); const scalar_t* A_ptr = A_column_major_copy.data_ptr<scalar_t>(); const int64_t A_matrix_stride = matrixStride(A_column_major_copy); const int64_t ldb = std::max<int64_t>(1, A_column_major_copy.size(-1)); int* infos_ptr = infos.data_ptr<int>(); auto self_ptr_array = get_device_pointers<scalar_t>(self_working_copy); auto A_ptr_array = get_device_pointers<scalar_t>(A_column_major_copy); at::cuda::solver::potrsBatched( handle, uplo, cuda_int_cast(n, "n"), cuda_int_cast(nrhs, "nrhs"), reinterpret_cast<scalar_t**>(A_ptr_array.data_ptr()), cuda_int_cast(lda, "lda"), reinterpret_cast<scalar_t**>(self_ptr_array.data_ptr()), cuda_int_cast(ldb, "ldb"), infos_ptr, cuda_int_cast(batch_size, "batch_size") ); } Tensor _cholesky_solve_helper_cuda_cusolver(const Tensor& self, const Tensor& A, bool upper) { const int64_t batch_size = batchCount(self); at::Tensor infos = at::zeros({1}, self.options().dtype(at::kInt)); at::Tensor self_working_copy = cloneBatchedColumnMajor(self); at::Tensor A_column_major_copy = cloneBatchedColumnMajor(A); const int64_t nrhs = self_working_copy.size(-1); // cusolverDn<t>potrsBatched only supports nrhs == 1 if (batch_size > 1 && nrhs == 1) { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "cholesky_cuda_potrs_batched", [&] { apply_cholesky_cusolver_potrsBatched<scalar_t>(self_working_copy, A_column_major_copy, upper, infos); }); } else { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "cholesky_cuda_potrs", [&] { apply_cholesky_cusolver_potrs<scalar_t>(self_working_copy, A_column_major_copy, upper, infos); }); } // info from potrs and potrsBatched only report if the i-th parameter is wrong, not about the matrix singularity, etc. // So we don't need to check it all the time. TORCH_INTERNAL_ASSERT_DEBUG_ONLY(infos.item().toInt() == 0); return self_working_copy; } void _cholesky_inverse_cusolver_potrs_based(Tensor& result, Tensor& infos, bool upper) { at::Tensor input_working_copy = cloneBatchedColumnMajor(result); at::Tensor infos_gpu = at::zeros({1}, result.options().dtype(at::kInt)); result.fill_(0); result.diagonal(/*offset=*/0, /*dim1=*/-2, /*dim2=*/-1).fill_(1); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "cholesky_cuda_potri", [&] { apply_cholesky_cusolver_potrs<scalar_t>(result, input_working_copy, upper, infos_gpu); }); // Debug only: info of cusolver potrs only check if the i-th parameter is wrong // Function argument `infos` is a CPU tensor, the following copy will cause a device-host sync. // infos.copy_(infos_gpu); } Tensor& cholesky_inverse_kernel_impl_cusolver(Tensor &result, Tensor& infos, bool upper) { _cholesky_inverse_cusolver_potrs_based(result, infos, upper); return result; } /* The geqrf function computes the QR decomposition of a m x n matrix A. Args: * `A` - [in] Tensor with matrices for QR decomposition, [out] Tensor containing R in the upper triangle of A and elementary reflectors below the main diagonal of A * `tau` - Tensor containing the magnitudes of the elementary reflectors * `m` - The number of rows of `input` to consider * `n` - The number of columns of `input` to consider (actual sizes of `input` could be larger) For further details, please see the cuSOLVER documentation for GEQRF. */ template <typename scalar_t> static void apply_geqrf(const Tensor& A, const Tensor& tau) { int64_t m = A.size(-2); int64_t n = A.size(-1); int64_t lda = std::max<int64_t>(1, m); int64_t batch_size = batchCount(A); auto A_stride = matrixStride(A); auto tau_stride = tau.size(-1); auto A_data = A.data_ptr<scalar_t>(); auto tau_data = tau.data_ptr<scalar_t>(); auto infos = at::zeros({1}, A.options().dtype(at::kInt)); auto infos_data = infos.data_ptr<int>(); // get the optimal work size and allocate workspace tensor #ifdef USE_CUSOLVER_64_BIT size_t worksize_device; // workspaceInBytesOnDevice size_t worksize_host; // workspaceInBytesOnHost hipsolverDnParams_t params = NULL; // use default algorithm (currently it's the only option) at::cuda::solver::xgeqrf_bufferSize<scalar_t>( at::cuda::getCurrentCUDASolverDnHandle(), params, m, n, A_data, lda, tau_data, &worksize_device, &worksize_host); #else int lwork; int m_32 = cuda_int_cast(m, "m"); int n_32 = cuda_int_cast(n, "n"); int lda_32 = cuda_int_cast(lda, "lda"); at::cuda::solver::geqrf_bufferSize<scalar_t>( at::cuda::getCurrentCUDASolverDnHandle(), m_32, n_32, A_data, lda_32, &lwork); #endif // USE_CUSOLVER_64_BIT for (decltype(batch_size) i = 0; i < batch_size; i++) { scalar_t* A_working_ptr = &A_data[i * A_stride]; scalar_t* tau_working_ptr = &tau_data[i * tau_stride]; auto handle = at::cuda::getCurrentCUDASolverDnHandle(); #ifdef USE_CUSOLVER_64_BIT // allocate workspace storage on device and host auto& device_allocator = *at::cuda::getCUDADeviceAllocator(); auto work_device_data = device_allocator.allocate(worksize_device); auto& host_allocator = *at::getCPUAllocator(); auto work_host_data = host_allocator.allocate(worksize_host); at::cuda::solver::xgeqrf<scalar_t>( handle, params, m, n, A_working_ptr, lda, tau_working_ptr, static_cast<scalar_t*>(work_device_data.get()), worksize_device, static_cast<scalar_t*>(work_host_data.get()), worksize_host, infos_data); #else // allocate workspace storage on device auto& allocator = *at::cuda::getCUDADeviceAllocator(); auto work_data = allocator.allocate(sizeof(scalar_t) * std::max<int>(1, lwork)); at::cuda::solver::geqrf<scalar_t>( handle, m_32, n_32, A_working_ptr, lda_32, tau_working_ptr, static_cast<scalar_t*>(work_data.get()), lwork, infos_data); #endif // USE_CUSOLVER_64_BIT } // info from geqrf only reports if the i-th parameter is wrong, not about the matrix singularity // so we don't need to check it all the time TORCH_INTERNAL_ASSERT_DEBUG_ONLY(infos.item().toInt() == 0); } // This is a type dispatching helper function for 'apply_geqrf' void geqrf_cusolver(const Tensor& input, const Tensor& tau) { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(input.scalar_type(), "geqrf_cuda", [&]{ apply_geqrf<scalar_t>(input, tau); }); } /* The ormqr function multiplies Q with another matrix from a sequence of elementary reflectors, such as is produced by the geqrf function. Args: * `input` - Tensor with elementary reflectors below the diagonal, encoding the matrix Q. * `tau` - Tensor containing the magnitudes of the elementary reflectors. * `other` - [in] Tensor containing the matrix to be multiplied. [out] result of the matrix multiplication with Q. * `left` - bool, determining whether `other` is left- or right-multiplied with Q. * `transpose` - bool, determining whether to transpose (or conjugate transpose) Q before multiplying. For further details, please see the cuSOLVER documentation for ORMQR and UNMQR. */ template <typename scalar_t> static void apply_ormqr(const Tensor& input, const Tensor& tau, const Tensor& other, bool left, bool transpose) { using value_t = typename c10::scalar_value_type<scalar_t>::type; auto side = left ? HIPBLAS_SIDE_LEFT : HIPBLAS_SIDE_RIGHT; auto trans = transpose ? (input.is_complex() ? HIPBLAS_OP_C : HIPBLAS_OP_T) : HIPBLAS_OP_N; auto input_data = input.data_ptr<scalar_t>(); auto tau_data = tau.data_ptr<scalar_t>(); auto other_data = other.data_ptr<scalar_t>(); auto input_matrix_stride = matrixStride(input); auto other_matrix_stride = matrixStride(other); auto tau_stride = tau.size(-1); auto batch_size = batchCount(input); auto m = cuda_int_cast(other.size(-2), "m"); auto n = cuda_int_cast(other.size(-1), "n"); auto k = cuda_int_cast(tau.size(-1), "k"); auto lda = std::max<int>(1, left ? m : n); auto ldc = std::max<int>(1, m); // get the optimal work size and allocate workspace tensor int lwork; at::cuda::solver::ormqr_bufferSize<scalar_t>( at::cuda::getCurrentCUDASolverDnHandle(), side, trans, m, n, k, input_data, lda, tau_data, other_data, ldc, &lwork); auto info = at::zeros({1}, input.options().dtype(at::kInt)); auto info_data = info.data_ptr<int>(); for (auto i = decltype(batch_size){0}; i < batch_size; i++) { scalar_t* input_working_ptr = &input_data[i * input_matrix_stride]; scalar_t* other_working_ptr = &other_data[i * other_matrix_stride]; scalar_t* tau_working_ptr = &tau_data[i * tau_stride]; auto handle = at::cuda::getCurrentCUDASolverDnHandle(); // allocate workspace storage auto& allocator = *at::cuda::getCUDADeviceAllocator(); auto work_data = allocator.allocate(sizeof(scalar_t)*lwork); at::cuda::solver::ormqr<scalar_t>( handle, side, trans, m, n, k, input_working_ptr, lda, tau_working_ptr, other_working_ptr, ldc, static_cast<scalar_t*>(work_data.get()), lwork, info_data ); // info from ormqr only reports if the i-th parameter is wrong // so we don't need to check it all the time TORCH_INTERNAL_ASSERT_DEBUG_ONLY(info.item().toInt() == 0); } } // This is a type dispatching helper function for 'apply_ormqr' void ormqr_cusolver(const Tensor& input, const Tensor& tau, const Tensor& other, bool left, bool transpose) { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(input.scalar_type(), "orgmr_cuda", [&]{ apply_ormqr<scalar_t>(input, tau, other, left, transpose); }); } /* The orgqr function allows reconstruction of an orthogonal (or unitary) matrix Q, from a sequence of elementary reflectors, such as produced by the geqrf function. Args: * `self` - Tensor with the directions of the elementary reflectors below the diagonal, it will be overwritten with the result * `tau` - Tensor containing the magnitudes of the elementary reflectors For further details, please see the cuSOLVER documentation for ORGQR and UNGQR. */ template <typename scalar_t> inline static void apply_orgqr(Tensor& self, const Tensor& tau) { using value_t = typename c10::scalar_value_type<scalar_t>::type; auto self_data = self.data_ptr<scalar_t>(); auto tau_data = tau.data_ptr<scalar_t>(); auto self_matrix_stride = matrixStride(self); auto batchsize = cuda_int_cast(batchCount(self), "batch size"); auto m = cuda_int_cast(self.size(-2), "m"); auto n = cuda_int_cast(self.size(-1), "n"); auto k = cuda_int_cast(tau.size(-1), "k"); auto tau_stride = std::max<int>(1, k); auto lda = std::max<int>(1, m); // LAPACK's requirement TORCH_INTERNAL_ASSERT(m >= n); TORCH_INTERNAL_ASSERT(n >= k); // cuSOLVER doesn't compute anything for this case, which is wrong // the result should be a matrix with 1 on the diagonal if (k == 0) { self.fill_(0); self.diagonal(/*offset=*/0, /*dim1=*/-2, /*dim2=*/-1).fill_(1); return; } // get the optimal work size and allocate workspace tensor int lwork; at::cuda::solver::orgqr_buffersize<scalar_t>( at::cuda::getCurrentCUDASolverDnHandle(), m, n, k, self_data, lda, tau_data, &lwork); auto info = at::zeros({1}, self.options().dtype(at::kInt)); auto info_data = info.data_ptr<int>(); for (auto i = decltype(batchsize){0}; i < batchsize; i++) { scalar_t* self_working_ptr = &self_data[i * self_matrix_stride]; scalar_t* tau_working_ptr = &tau_data[i * tau_stride]; auto handle = at::cuda::getCurrentCUDASolverDnHandle(); // allocate workspace storage auto& allocator = *at::cuda::getCUDADeviceAllocator(); auto work_data = allocator.allocate(sizeof(scalar_t)*lwork); at::cuda::solver::orgqr<scalar_t>( handle, m, n, k, self_working_ptr, lda, tau_working_ptr, static_cast<scalar_t*>(work_data.get()), lwork, info_data ); // info from orgqr only reports if the i-th parameter is wrong // so we don't need to check it all the time TORCH_INTERNAL_ASSERT_DEBUG_ONLY(info.item().toInt() == 0); } } // This is a type dispatching helper function for 'apply_orgqr' Tensor& orgqr_helper_cusolver(Tensor& result, const Tensor& tau) { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "orgqr_cuda", [&]{ apply_orgqr<scalar_t>(result, tau); }); return result; } template <typename scalar_t> static void apply_syevd(Tensor& values, Tensor& vectors, Tensor& infos, bool upper, bool compute_eigenvectors) { using value_t = typename c10::scalar_value_type<scalar_t>::type; hipblasFillMode_t uplo = upper ? HIPBLAS_FILL_MODE_UPPER : HIPBLAS_FILL_MODE_LOWER; hipsolverEigMode_t jobz = compute_eigenvectors ? HIPSOLVER_EIG_MODE_VECTOR : HIPSOLVER_EIG_MODE_NOVECTOR; int64_t n = vectors.size(-1); int64_t lda = std::max<int64_t>(1, n); int64_t batch_size = batchCount(vectors); auto vectors_stride = matrixStride(vectors); auto values_stride = values.size(-1); auto vectors_data = vectors.data_ptr<scalar_t>(); auto values_data = values.data_ptr<value_t>(); auto infos_data = infos.data_ptr<int>(); // get the optimal work size and allocate workspace tensor #ifdef USE_CUSOLVER_64_BIT size_t worksize_device; // workspaceInBytesOnDevice size_t worksize_host; // workspaceInBytesOnHost hipsolverDnParams_t params = NULL; // use default algorithm (currently it's the only option) at::cuda::solver::xsyevd_bufferSize<scalar_t>( at::cuda::getCurrentCUDASolverDnHandle(), params, jobz, uplo, n, vectors_data, lda, values_data, &worksize_device, &worksize_host); #else int lwork; int n_32 = cuda_int_cast(n, "n"); int lda_32 = cuda_int_cast(lda, "lda"); at::cuda::solver::syevd_bufferSize<scalar_t>( at::cuda::getCurrentCUDASolverDnHandle(), jobz, uplo, n_32, vectors_data, lda_32, values_data, &lwork); #endif // USE_CUSOLVER_64_BIT for (decltype(batch_size) i = 0; i < batch_size; i++) { scalar_t* vectors_working_ptr = &vectors_data[i * vectors_stride]; value_t* values_working_ptr = &values_data[i * values_stride]; int* info_working_ptr = &infos_data[i]; auto handle = at::cuda::getCurrentCUDASolverDnHandle(); #ifdef USE_CUSOLVER_64_BIT // allocate workspace storage on device and host auto& device_allocator = *at::cuda::getCUDADeviceAllocator(); auto work_device_data = device_allocator.allocate(worksize_device); auto& host_allocator = *at::getCPUAllocator(); auto work_host_data = host_allocator.allocate(worksize_host); at::cuda::solver::xsyevd<scalar_t>( handle, params, jobz, uplo, n, vectors_working_ptr, lda, values_working_ptr, static_cast<scalar_t*>(work_device_data.get()), worksize_device, static_cast<scalar_t*>(work_host_data.get()), worksize_host, info_working_ptr); #else // allocate workspace storage on device auto& allocator = *at::cuda::getCUDADeviceAllocator(); auto work_data = allocator.allocate(sizeof(scalar_t) * lwork); at::cuda::solver::syevd<scalar_t>( handle, jobz, uplo, n_32, vectors_working_ptr, lda_32, values_working_ptr, static_cast<scalar_t*>(work_data.get()), lwork, info_working_ptr); #endif // USE_CUSOLVER_64_BIT } } template <typename scalar_t> static void apply_syevj(Tensor& values, Tensor& vectors, Tensor& infos, bool upper, bool compute_eigenvectors) { using value_t = typename c10::scalar_value_type<scalar_t>::type; hipblasFillMode_t uplo = upper ? HIPBLAS_FILL_MODE_UPPER : HIPBLAS_FILL_MODE_LOWER; hipsolverEigMode_t jobz = compute_eigenvectors ? HIPSOLVER_EIG_MODE_VECTOR : HIPSOLVER_EIG_MODE_NOVECTOR; int n = cuda_int_cast(vectors.size(-1), "n"); int lda = std::max<int>(1, n); auto batch_size = batchCount(vectors); auto vectors_stride = matrixStride(vectors); auto values_stride = values.size(-1); auto vectors_data = vectors.data_ptr<scalar_t>(); auto values_data = values.data_ptr<value_t>(); auto infos_data = infos.data_ptr<int>(); // syevj_params controls the numerical accuracy of syevj // by default the tolerance is set to machine accuracy // the maximum number of iteration of Jacobi method by default is 100 // cuSOLVER documentations says: "15 sweeps are good enough to converge to machine accuracy" // LAPACK has SVD routine based on similar Jacobi algorithm (gesvj) and there a maximum of 30 iterations is set // Let's use the default values for now hipsolverSyevjInfo_t syevj_params; TORCH_CUSOLVER_CHECK(hipsolverDnCreateSyevjInfo(&syevj_params)); // get the optimal work size and allocate workspace tensor int lwork; at::cuda::solver::syevj_bufferSize<scalar_t>( at::cuda::getCurrentCUDASolverDnHandle(), jobz, uplo, n, vectors_data, lda, values_data, &lwork, syevj_params); for (decltype(batch_size) i = 0; i < batch_size; i++) { scalar_t* vectors_working_ptr = &vectors_data[i * vectors_stride]; value_t* values_working_ptr = &values_data[i * values_stride]; int* info_working_ptr = &infos_data[i]; auto handle = at::cuda::getCurrentCUDASolverDnHandle(); // allocate workspace storage on device auto& allocator = *at::cuda::getCUDADeviceAllocator(); auto work_data = allocator.allocate(sizeof(scalar_t) * lwork); at::cuda::solver::syevj<scalar_t>( handle, jobz, uplo, n, vectors_working_ptr, lda, values_working_ptr, static_cast<scalar_t*>(work_data.get()), lwork, info_working_ptr, syevj_params); } TORCH_CUSOLVER_CHECK(hipsolverDnDestroySyevjInfo(syevj_params)); } template <typename scalar_t> static void apply_syevj_batched(Tensor& values, Tensor& vectors, Tensor& infos, bool upper, bool compute_eigenvectors) { using value_t = typename c10::scalar_value_type<scalar_t>::type; hipblasFillMode_t uplo = upper ? HIPBLAS_FILL_MODE_UPPER : HIPBLAS_FILL_MODE_LOWER; hipsolverEigMode_t jobz = compute_eigenvectors ? HIPSOLVER_EIG_MODE_VECTOR : HIPSOLVER_EIG_MODE_NOVECTOR; int n = cuda_int_cast(vectors.size(-1), "n"); int lda = std::max<int>(1, n); int batch_size = cuda_int_cast(batchCount(vectors), "batch_size"); auto vectors_data = vectors.data_ptr<scalar_t>(); auto values_data = values.data_ptr<value_t>(); auto infos_data = infos.data_ptr<int>(); // syevj_params controls the numerical accuracy of syevj // by default the tolerance is set to machine accuracy // the maximum number of iteration of Jacobi method by default is 100 // cuSOLVER documentations says: "15 sweeps are good enough to converge to machine accuracy" // LAPACK has SVD routine based on similar Jacobi algorithm (gesvj) and there a maximum of 30 iterations is set // Let's use the default values for now hipsolverSyevjInfo_t syevj_params; TORCH_CUSOLVER_CHECK(hipsolverDnCreateSyevjInfo(&syevj_params)); TORCH_CUSOLVER_CHECK(hipsolverDnXsyevjSetSortEig(syevj_params, 1)); auto handle = at::cuda::getCurrentCUDASolverDnHandle(); // get the optimal work size and allocate workspace tensor int lwork; at::cuda::solver::syevjBatched_bufferSize<scalar_t>( handle, jobz, uplo, n, vectors_data, lda, values_data, &lwork, syevj_params, batch_size); // allocate workspace storage on device auto& allocator = *at::cuda::getCUDADeviceAllocator(); auto work_data = allocator.allocate(sizeof(scalar_t) * lwork); at::cuda::solver::syevjBatched<scalar_t>( handle, jobz, uplo, n, vectors_data, lda, values_data, static_cast<scalar_t*>(work_data.get()), lwork, infos_data, syevj_params, batch_size); TORCH_CUSOLVER_CHECK(hipsolverDnDestroySyevjInfo(syevj_params)); } static void linalg_eigh_cusolver_syevd(Tensor& eigenvalues, Tensor& eigenvectors, Tensor& infos, bool upper, bool compute_eigenvectors) { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(eigenvectors.scalar_type(), "linalg_eigh_cuda", [&] { apply_syevd<scalar_t>(eigenvalues, eigenvectors, infos, upper, compute_eigenvectors); }); } static void linalg_eigh_cusolver_syevj(Tensor& eigenvalues, Tensor& eigenvectors, Tensor& infos, bool upper, bool compute_eigenvectors) { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(eigenvectors.scalar_type(), "linalg_eigh_cuda", [&] { apply_syevj<scalar_t>(eigenvalues, eigenvectors, infos, upper, compute_eigenvectors); }); } void linalg_eigh_cusolver(Tensor& eigenvalues, Tensor& eigenvectors, Tensor& infos, bool upper, bool compute_eigenvectors) { // TODO: syevj_batched should be added here, but at least for CUDA 11.2 it contains a bug leading to incorrect results // See https://github.com/pytorch/pytorch/pull/53040#issuecomment-793626268 and https://github.com/cupy/cupy/issues/4847 // syevj is better than syevd for float32 dtype and matrix sizes 32x32 - 512x512 // See https://github.com/pytorch/pytorch/pull/53040#issuecomment-788264724 if (eigenvectors.scalar_type() == at::kFloat && eigenvectors.size(-1) >= 32 && eigenvectors.size(-1) <= 512) { return linalg_eigh_cusolver_syevj(eigenvalues, eigenvectors, infos, upper, compute_eigenvectors); } else { return linalg_eigh_cusolver_syevd(eigenvalues, eigenvectors, infos, upper, compute_eigenvectors); } } #endif // USE_CUSOLVER }} // namespace at::native
e5554e08038f0d5baec28162d0844bb99fe343ea.cu
#include <ATen/Context.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/Dispatch.h> #include <ATen/NativeFunctions.h> #include <ATen/cuda/PinnedMemoryAllocator.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <ATen/cuda/detail/IndexUtils.cuh> #include <ATen/cuda/CUDASolver.h> #include <ATen/cuda/CUDABlas.h> #include <ATen/cuda/CUDAEvent.h> #include <c10/cuda/CUDAStream.h> #include <ATen/native/LinearAlgebraUtils.h> #include <ATen/native/cuda/MiscUtils.h> #include <ATen/native/cuda/BatchLinearAlgebraLib.h> namespace at { namespace native { // Some cuBLAS and cuSOLVER batched routines require input to be a device array of pointers to device individual matrices // 'input' must be a contiguous tensor template <typename scalar_t> static Tensor get_device_pointers(const Tensor& input) { auto input_data = input.data_ptr<scalar_t>(); int64_t input_mat_stride = matrixStride(input); // cublas/cusolver interface requires 'int' int batch_size = cuda_int_cast(batchCount(input), "batch_size"); // if batch_size==0, then start=0 and end=0 // if input_mat_stride==0, then step=sizeof(scalar_t) return at::arange( /*start=*/reinterpret_cast<int64_t>(input_data), /*end=*/reinterpret_cast<int64_t>(input_data + batch_size * input_mat_stride), /*step=*/static_cast<int64_t>(std::max<int64_t>(input_mat_stride, 1) * sizeof(scalar_t)), input.options().dtype(at::kLong)); } template <typename scalar_t> void apply_geqrf_batched(const Tensor& input, const Tensor& tau) { // AMD ROCm backend is implemented via rewriting all CUDA calls to HIP // rocBLAS does not implement BLAS-like extensions of cuBLAS, they're in rocSOLVER // rocSOLVER is currently not used in ATen, therefore we raise an error in this case #ifndef CUDART_VERSION TORCH_CHECK(false, "geqrf: Batched version is supported only with cuBLAS backend.") #else auto batch_size = cuda_int_cast(batchCount(input), "batch_size"); auto m = cuda_int_cast(input.size(-2), "m"); auto n = cuda_int_cast(input.size(-1), "n"); auto lda = std::max<int>(1, m); // cuBLAS batched geqrf requires input to be the device array of pointers to device single matrices Tensor input_ptr_array = get_device_pointers<scalar_t>(input); Tensor tau_ptr_array = get_device_pointers<scalar_t>(tau.unsqueeze(-1)); auto input_ptr_array_data = reinterpret_cast<scalar_t**>(input_ptr_array.data_ptr()); auto tau_ptr_array_data = reinterpret_cast<scalar_t**>(tau_ptr_array.data_ptr()); int info; auto handle = at::cuda::getCurrentCUDABlasHandle(); at::cuda::blas::geqrfBatched(handle, m, n, input_ptr_array_data, lda, tau_ptr_array_data, &info, batch_size); // info only indicates wrong arguments to geqrfBatched call // info is a host variable, we can check it without device synchronization TORCH_INTERNAL_ASSERT(info == 0); #endif } void geqrf_batched_cublas(const Tensor& input, const Tensor& tau) { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(input.scalar_type(), "geqrf_batched_cuda", [&]{ apply_geqrf_batched<scalar_t>(input, tau); }); } template <typename scalar_t> static void apply_triangular_solve(Tensor& A, Tensor& B, bool upper, bool transpose, bool conjugate_transpose, bool unitriangular) { cublasFillMode_t uplo = upper ? CUBLAS_FILL_MODE_UPPER : CUBLAS_FILL_MODE_LOWER; cublasOperation_t trans = transpose ? CUBLAS_OP_T : CUBLAS_OP_N; trans = conjugate_transpose ? CUBLAS_OP_C : trans; cublasDiagType_t diag = unitriangular ? CUBLAS_DIAG_UNIT : CUBLAS_DIAG_NON_UNIT; cublasSideMode_t side = CUBLAS_SIDE_LEFT; auto A_data = A.data_ptr<scalar_t>(); auto B_data = B.data_ptr<scalar_t>(); auto A_mat_stride = matrixStride(A); auto B_mat_stride = matrixStride(B); auto batch_size = batchCount(A); auto n = cuda_int_cast(A.size(-2), "n"); auto nrhs = cuda_int_cast(B.size(-1), "nrhs"); auto lda = std::max<int>(1, n); auto alpha = scalar_t{1}; for (decltype(batch_size) i = 0; i < batch_size; i++) { scalar_t* A_working_ptr = &A_data[i * A_mat_stride]; scalar_t* B_working_ptr = &B_data[i * B_mat_stride]; auto handle = at::cuda::getCurrentCUDABlasHandle(); at::cuda::blas::trsm(handle, side, uplo, trans, diag, n, nrhs, &alpha, A_working_ptr, lda, B_working_ptr, lda); } } void triangular_solve_cublas(Tensor& A, Tensor& B, Tensor& infos, bool upper, bool transpose, bool conjugate_transpose, bool unitriangular) { (void)infos; // unused AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(A.scalar_type(), "triangular_solve_cuda", [&]{ apply_triangular_solve<scalar_t>(A, B, upper, transpose, conjugate_transpose, unitriangular); }); } template <typename scalar_t> static void apply_triangular_solve_batched(Tensor& A, Tensor& B, bool upper, bool transpose, bool conjugate_transpose, bool unitriangular) { cublasFillMode_t uplo = upper ? CUBLAS_FILL_MODE_UPPER : CUBLAS_FILL_MODE_LOWER; cublasOperation_t trans = transpose ? CUBLAS_OP_T : CUBLAS_OP_N; trans = conjugate_transpose ? CUBLAS_OP_C : trans; cublasDiagType_t diag = unitriangular ? CUBLAS_DIAG_UNIT : CUBLAS_DIAG_NON_UNIT; cublasSideMode_t side = CUBLAS_SIDE_LEFT; auto A_data = A.data_ptr<scalar_t>(); auto B_data = B.data_ptr<scalar_t>(); auto A_mat_stride = matrixStride(A); auto B_mat_stride = matrixStride(B); auto batch_size = cuda_int_cast(batchCount(A), "batch_size"); auto n = cuda_int_cast(A.size(-2), "n"); auto nrhs = cuda_int_cast(B.size(-1), "nrhs"); auto lda = std::max<int>(1, n); auto alpha = scalar_t{1}; // cuBLAS batched trsm requires input to be the device array of pointers to device single matrices Tensor A_ptr_array = get_device_pointers<scalar_t>(A); Tensor B_ptr_array = get_device_pointers<scalar_t>(B); auto A_ptr_array_data = reinterpret_cast<scalar_t**>(A_ptr_array.data_ptr()); auto B_ptr_array_data = reinterpret_cast<scalar_t**>(B_ptr_array.data_ptr()); auto handle = at::cuda::getCurrentCUDABlasHandle(); at::cuda::blas::trsmBatched(handle, side, uplo, trans, diag, n, nrhs, &alpha, A_ptr_array_data, lda, B_ptr_array_data, lda, batch_size); } void triangular_solve_batched_cublas(Tensor& A, Tensor& B, Tensor& infos, bool upper, bool transpose, bool conjugate_transpose, bool unitriangular) { (void)infos; // unused AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(A.scalar_type(), "triangular_solve_cuda", [&]{ apply_triangular_solve_batched<scalar_t>(A, B, upper, transpose, conjugate_transpose, unitriangular); }); } #ifdef USE_CUSOLVER inline static Tensor column_major_identity_matrix_like(const Tensor& self) { auto size = self.sizes(); auto size_slice = IntArrayRef(size.data(), size.size()-1); return at::ones(size_slice, self.options()).diag_embed().transpose(-2, -1); } template <typename scalar_t> inline static void _apply_single_inverse_helper(scalar_t* self_ptr, scalar_t* self_inv_ptr, int* ipiv_ptr, int* info_getrf_ptr, int* info_getrs_ptr, int n, int lda) { // self_inv_ptr should already be an identity matrix auto handle = at::cuda::getCurrentCUDASolverDnHandle(); at::cuda::solver::getrf<scalar_t>(handle, n, n, self_ptr, lda, ipiv_ptr, info_getrf_ptr); at::cuda::solver::getrs<scalar_t>(handle, n, n, self_ptr, lda, ipiv_ptr, self_inv_ptr, lda, info_getrs_ptr); } template <typename scalar_t> static void apply_batched_inverse_lib(Tensor& self, Tensor& self_inv, Tensor& infos_getrf, Tensor& infos_getrs) { const int batch_size = cuda_int_cast(batchCount(self), "batchCount"); const int n = cuda_int_cast(self.size(-2), "self.size(-2)"); const int lda = std::max<int>(1, n); auto self_data = self.data_ptr<scalar_t>(); auto self_mat_stride = matrixStride(self); auto self_inv_data = self_inv.data_ptr<scalar_t>(); auto self_inv_mat_stride = matrixStride(self_inv); auto infos_getrf_data = infos_getrf.data_ptr<int>(); auto infos_getrs_data = infos_getrs.data_ptr<int>(); auto& allocator = *::c10::cuda::CUDACachingAllocator::get(); // Heuristic: For small batch size or large matrix size, we use for-loop to iterate over the batches instead of // calling the batched cublas routine. if (batch_size <= 8 || /* batch_size > 8 && */ n >= 512) { for (int64_t i = 0; i < batch_size; i++) { auto dataPtr = allocator.allocate(sizeof(int) * lda); int* pivot = reinterpret_cast<int*>(dataPtr.get()); int* infos_getrf_working_ptr = &infos_getrf_data[i]; int* infos_getrs_working_ptr = &infos_getrs_data[i]; _apply_single_inverse_helper<scalar_t>( &self_data[i * self_mat_stride], &self_inv_data[i * self_inv_mat_stride], pivot, infos_getrf_working_ptr, infos_getrs_working_ptr, n, lda); } } else { // cublas batched kernels require input be "device array of device pointers" Tensor self_array = at::arange( reinterpret_cast<int64_t>(self_data), reinterpret_cast<int64_t>(&self_data[(batch_size-1) * self_mat_stride]) + 1, static_cast<int64_t>(self_mat_stride * sizeof(scalar_t)), self.options().dtype(at::kLong)); Tensor self_inv_array = at::arange( reinterpret_cast<int64_t>(self_inv_data), reinterpret_cast<int64_t>(&self_inv_data[(batch_size-1) * self_inv_mat_stride]) + 1, static_cast<int64_t>(self_inv_mat_stride * sizeof(scalar_t)), self.options().dtype(at::kLong)); auto dataPtr = allocator.allocate(sizeof(int)*batch_size*lda); int* ipiv_array = reinterpret_cast<int*>(dataPtr.get()); at::cuda::blas::getrfBatched<scalar_t>(n, reinterpret_cast<scalar_t**>(self_array.data_ptr()), lda, ipiv_array, infos_getrf_data, batch_size); at::cuda::blas::getriBatched<scalar_t>(n, reinterpret_cast<scalar_t**>(self_array.data_ptr()), lda, ipiv_array, reinterpret_cast<scalar_t**>(self_inv_array.data_ptr()), lda, infos_getrs_data, batch_size); } } template <typename scalar_t> static void apply_single_inverse_lib(const Tensor& self, Tensor& self_inv, Tensor& infos_getrf, Tensor& infos_getrs) { int n = cuda_int_cast(self.size(-2), "self.size(-2)"); int lda = std::max<int>(1, n); Tensor ipiv = at::empty({lda}, self.options().dtype(at::kInt)); _apply_single_inverse_helper<scalar_t>( self.data_ptr<scalar_t>(), self_inv.data_ptr<scalar_t>(), ipiv.data_ptr<int>(), infos_getrf.data_ptr<int>(), infos_getrs.data_ptr<int>(), n, lda); } // This is a type dispatching helper function for 'apply_batched_inverse_lib' and 'apply_single_inverse_lib' Tensor& _linalg_inv_out_helper_cuda_lib(Tensor& result, Tensor& infos_getrf, Tensor& infos_getrs) { // assuming result is in column major order and contains the matrices to invert Tensor input_working_copy = cloneBatchedColumnMajor(result); // for getrf + getrs (cusolver path) // result should be filled with identity matrices result.zero_(); result.diagonal(/*offset=*/0, /*dim1=*/-2, /*dim2=*/-1).fill_(1); const int batch_size = cuda_int_cast(batchCount(result), "batchCount"); if (result.dim() > 2) { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "linalg_inv_out_cuda", [&]{ apply_batched_inverse_lib<scalar_t>( input_working_copy, result, infos_getrf, infos_getrs); }); } else { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "linalg_inv_out_cuda", [&]{ apply_single_inverse_lib<scalar_t>(input_working_copy, result, infos_getrf, infos_getrs); }); } return result; } // entrance of calculations of `inverse` using cusolver getrf + getrs, cublas getrfBatched + getriBatched Tensor _inverse_helper_cuda_lib(const Tensor& self) { Tensor self_working_copy = cloneBatchedColumnMajor(self); Tensor self_inv_working_copy = column_major_identity_matrix_like(self_working_copy); const int batch_size = cuda_int_cast(batchCount(self), "batchCount"); if (self.dim() > 2 && batch_size > 1) { Tensor infos_getrf = at::zeros({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt)); Tensor infos_getrs = at::zeros({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt)); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "inverse_cuda", [&]{ apply_batched_inverse_lib<scalar_t>( self_working_copy, self_inv_working_copy, infos_getrf, infos_getrs); }); batchCheckErrors(infos_getrf, "inverse_cuda"); batchCheckErrors(infos_getrs, "inverse_cuda"); } else { Tensor infos_getrf = at::zeros({1}, self.options().dtype(kInt)); Tensor infos_getrs = at::zeros({1}, self.options().dtype(kInt)); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "inverse_cuda", [&]{ apply_single_inverse_lib<scalar_t>(self_working_copy, self_inv_working_copy, infos_getrf, infos_getrs); }); batchCheckErrors(infos_getrf, "inverse_cuda"); batchCheckErrors(infos_getrs, "inverse_cuda"); } return self_inv_working_copy; } // call cusolver gesvdj function to calculate svd template<typename scalar_t> inline static void _apply_svd_lib_gesvdj(const Tensor& self, Tensor& U, Tensor& S, Tensor& VT, Tensor& infos, bool compute_uv, bool some) { using value_t = typename c10::scalar_value_type<scalar_t>::type; auto self_data = self.data_ptr<scalar_t>(); auto U_data = U.data_ptr<scalar_t>(); auto S_data = S.data_ptr<value_t>(); auto VT_data = VT.data_ptr<scalar_t>(); auto self_stride = matrixStride(self); auto U_stride = matrixStride(U); auto S_stride = S.size(-1); auto VT_stride = matrixStride(VT); int batchsize = cuda_int_cast(batchCount(self), "batch size"); int m = cuda_int_cast(self.size(-2), "m"); int n = cuda_int_cast(self.size(-1), "n"); int lda = std::max<int>(1, m); int ldvt = std::max<int>(1, n); for(int i = 0; i < batchsize; i++){ // gesvdj_params controls the numerical accuracy of cusolver gesvdj iterations on GPU gesvdjInfo_t gesvdj_params; TORCH_CUSOLVER_CHECK(cusolverDnCreateGesvdjInfo(&gesvdj_params)); // TORCH_CUSOLVER_CHECK(cusolverDnXgesvdjSetTolerance(gesvdj_params, 1.0e-7)); // TORCH_CUSOLVER_CHECK(cusolverDnXgesvdjSetMaxSweeps(gesvdj_params, 15)); auto handle = at::cuda::getCurrentCUDASolverDnHandle(); auto jobz = compute_uv ? CUSOLVER_EIG_MODE_VECTOR : CUSOLVER_EIG_MODE_NOVECTOR; at::cuda::solver::gesvdj<scalar_t>( handle, jobz, /*econ=*/ some ? 1 : 0, m, n, self_data + i * self_stride, lda, S_data + i * S_stride, U_data + i * U_stride, lda, VT_data + i * VT_stride, ldvt, infos.data_ptr<int>() + i, gesvdj_params ); TORCH_CUSOLVER_CHECK(cusolverDnDestroyGesvdjInfo(gesvdj_params)); } } // wrapper around _apply_svd_lib_gesvdj that handles dtype dispatch, // creates a working copy of the input, and creates V^H from the V returned by gesvdj inline static void apply_svd_lib_gesvdj(const Tensor& self, Tensor& U, Tensor& S, Tensor& VT, Tensor& infos, bool compute_uv, bool some) { const int64_t m = self.size(-2); const int64_t n = self.size(-1); Tensor self_working_copy = cloneBatchedColumnMajor(self); VT = VT.transpose(-2, -1); // gesvdj returns V instead of V^H AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "svd_cuda_gesvdj", [&] { _apply_svd_lib_gesvdj<scalar_t>(self_working_copy, U, S, VT, infos, compute_uv, some); }); } // call cusolver gesvdj batched function to calculate svd template<typename scalar_t> inline static void _apply_svd_lib_gesvdjBatched(const Tensor& self, Tensor& U, Tensor& S, Tensor& VT, Tensor& infos, bool compute_uv) { using value_t = typename c10::scalar_value_type<scalar_t>::type; auto self_data = self.data_ptr<scalar_t>(); auto U_data = U.data_ptr<scalar_t>(); auto S_data = S.data_ptr<value_t>(); auto VT_data = VT.data_ptr<scalar_t>(); auto self_stride = matrixStride(self); auto U_stride = matrixStride(U); auto S_stride = S.size(-1); auto VT_stride = matrixStride(VT); int batchsize = cuda_int_cast(batchCount(self), "batch size"); int m = cuda_int_cast(self.size(-2), "m"); int n = cuda_int_cast(self.size(-1), "n"); int lda = std::max<int>(1, m); int ldvt = std::max<int>(1, n); TORCH_INTERNAL_ASSERT(m <= 32 && n <= 32, "gesvdjBatched requires both matrix dimensions not greater than 32, but got " "m = ", m, " n = ", n); // gesvdj_params controls the numerical accuracy of cusolver gesvdj iterations on GPU gesvdjInfo_t gesvdj_params; TORCH_CUSOLVER_CHECK(cusolverDnCreateGesvdjInfo(&gesvdj_params)); // TORCH_CUSOLVER_CHECK(cusolverDnXgesvdjSetTolerance(gesvdj_params, 1.0e-7)); // TORCH_CUSOLVER_CHECK(cusolverDnXgesvdjSetMaxSweeps(gesvdj_params, 15)); TORCH_CUSOLVER_CHECK(cusolverDnXgesvdjSetSortEig(gesvdj_params, 1)); auto handle = at::cuda::getCurrentCUDASolverDnHandle(); auto jobz = compute_uv ? CUSOLVER_EIG_MODE_VECTOR : CUSOLVER_EIG_MODE_NOVECTOR; at::cuda::solver::gesvdjBatched<scalar_t>( handle, jobz, m, n, self_data, lda, S_data, U_data, lda, VT_data, ldvt, infos.data_ptr<int>(), gesvdj_params, batchsize ); TORCH_CUSOLVER_CHECK(cusolverDnDestroyGesvdjInfo(gesvdj_params)); } // wrapper around _apply_svd_lib_gesvdjBatched that handles dtype dispatch, // creates a working copy of the input, and creates V^H from the V returned by gesvdj inline static void apply_svd_lib_gesvdjBatched(const Tensor& self, Tensor& U, Tensor& S, Tensor& VT, Tensor& infos, bool compute_uv) { const int64_t m = self.size(-2); const int64_t n = self.size(-1); Tensor self_working_copy = cloneBatchedColumnMajor(self); VT = VT.transpose(-2, -1); // gesvdj returns V instead of V^H AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "svd_cuda_gesvdjBatched", [&] { _apply_svd_lib_gesvdjBatched<scalar_t>(self_working_copy, U, S, VT, infos, compute_uv); }); } // entrance of calculations of `svd` using cusolver gesvdj and gesvdjBatched std::tuple<Tensor, Tensor, Tensor> _svd_helper_cuda_lib(const Tensor& self, bool some, bool compute_uv) { const int64_t batch_size = batchCount(self); at::Tensor infos = at::zeros({batch_size}, self.options().dtype(at::kInt)); const int64_t m = self.size(-2); const int64_t n = self.size(-1); const int64_t k = std::min(m, n); Tensor U_working_copy, S_working_copy, VT_working_copy; std::tie(U_working_copy, S_working_copy, VT_working_copy) = \ _create_U_S_VT(self, some, compute_uv, /* svd_use_cusolver = */ true); // U, S, V working copies are already column majored now // heuristic for using `gesvdjBatched` over `gesvdj` if (m <= 32 && n <= 32 && batch_size > 1 && (!some || m == n)) { apply_svd_lib_gesvdjBatched(self, U_working_copy, S_working_copy, VT_working_copy, infos, compute_uv); } else { apply_svd_lib_gesvdj(self, U_working_copy, S_working_copy, VT_working_copy, infos, compute_uv, some); } // A device-host sync will be performed. batchCheckErrors(infos, "svd_cuda"); if (!compute_uv) { VT_working_copy.zero_(); U_working_copy.zero_(); } if (some) { VT_working_copy = VT_working_copy.narrow(-2, 0, k); } // so far we have computed VT, but torch.svd returns V instead. Adjust accordingly. VT_working_copy.transpose_(-2, -1); return std::make_tuple(U_working_copy, S_working_copy, VT_working_copy); } // Todo: cusolverDnXpotrfBatched has some numerical issue and is not used here. // A loop of cusolverDnXpotrf is used in case MAGMA is not linked in the pytorch build. // We will switch to cusolverDnXpotrfBatched after the issue is fixed. // See https://github.com/pytorch/pytorch/issues/53879. template<typename scalar_t> inline static void apply_cholesky_cusolver(const Tensor& self_working_copy, bool upper, const Tensor& infos) { auto handle = at::cuda::getCurrentCUDASolverDnHandle(); const auto uplo = upper ? CUBLAS_FILL_MODE_UPPER : CUBLAS_FILL_MODE_LOWER; const int64_t n = self_working_copy.size(-1); const int64_t lda = std::max<int64_t>(1, n); const int64_t batch_size = batchCount(self_working_copy); const int64_t matrix_stride = matrixStride(self_working_copy); scalar_t* self_working_copy_ptr = self_working_copy.data_ptr<scalar_t>(); int* infos_ptr = infos.data_ptr<int>(); #ifdef USE_CUSOLVER_64_BIT size_t worksize_device; size_t worksize_host; cusolverDnParams_t params; cudaDataType datatype = at::cuda::solver::get_cusolver_datatype<scalar_t>(); TORCH_CUSOLVER_CHECK(cusolverDnCreateParams(&params)); at::cuda::solver::xpotrf_buffersize(handle, params, uplo, n, datatype, nullptr, lda, datatype, &worksize_device, &worksize_host); // allocate workspace storage auto& device_allocator = *at::cuda::getCUDADeviceAllocator(); auto workdata_device = device_allocator.allocate(worksize_device * batch_size); void* workdata_device_ptr = workdata_device.get(); auto& host_allocator = *at::getCPUAllocator(); auto workdata_host = host_allocator.allocate(worksize_host * batch_size); void* workdata_host_ptr = workdata_host.get(); for (int64_t i = 0; i < batch_size; i++) { at::cuda::solver::xpotrf( handle, params, uplo, n, datatype, self_working_copy_ptr + i * matrix_stride, lda, datatype, (char*)workdata_device_ptr + i * worksize_device, worksize_device, (char*)workdata_host_ptr + i * worksize_host, worksize_host, infos_ptr + i ); } TORCH_CUSOLVER_CHECK(cusolverDnDestroyParams(params)); #else // USE_CUSOLVER_64_BIT int n_32 = cuda_int_cast(n, "n"); int lda_32 = cuda_int_cast(lda, "lda"); int lwork; at::cuda::solver::potrf_buffersize<scalar_t>( handle, uplo, n_32, nullptr, lda_32, &lwork); // allocate workspace storage auto& allocator = *at::cuda::getCUDADeviceAllocator(); auto work_data = allocator.allocate(sizeof(scalar_t)*lwork * batch_size); scalar_t* work_data_ptr = static_cast<scalar_t*>(work_data.get()); for (int64_t i = 0; i < batch_size; i++) { at::cuda::solver::potrf<scalar_t>( handle, uplo, n_32, self_working_copy_ptr + i * matrix_stride, lda_32, work_data_ptr + i * lwork, lwork, infos_ptr + i ); } #endif // USE_CUSOLVER_64_BIT } void cholesky_helper_cusolver(const Tensor& input, bool upper, const Tensor& info) { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(input.scalar_type(), "cholesky_cusolver", [&] { apply_cholesky_cusolver<scalar_t>(input, upper, info); }); } template<typename scalar_t> inline static void apply_cholesky_cusolver_potrs(Tensor& self_working_copy, const Tensor& A_column_major_copy, bool upper, Tensor& infos) { auto handle = at::cuda::getCurrentCUDASolverDnHandle(); const auto uplo = upper ? CUBLAS_FILL_MODE_UPPER : CUBLAS_FILL_MODE_LOWER; const int64_t n = self_working_copy.size(-2); const int64_t nrhs = self_working_copy.size(-1); const int64_t lda = std::max<int64_t>(1, n); const int64_t batch_size = batchCount(self_working_copy); const int64_t self_matrix_stride = matrixStride(self_working_copy); scalar_t* self_working_copy_ptr = self_working_copy.data_ptr<scalar_t>(); const scalar_t* A_ptr = A_column_major_copy.data_ptr<scalar_t>(); const int64_t A_matrix_stride = matrixStride(A_column_major_copy); const int64_t ldb = std::max<int64_t>(1, A_column_major_copy.size(-1)); int* infos_ptr = infos.data_ptr<int>(); #ifdef USE_CUSOLVER_64_BIT cusolverDnParams_t params; cudaDataType datatype = at::cuda::solver::get_cusolver_datatype<scalar_t>(); TORCH_CUSOLVER_CHECK(cusolverDnCreateParams(&params)); for (int64_t i = 0; i < batch_size; i++) { at::cuda::solver::xpotrs( handle, params, uplo, n, nrhs, datatype, A_ptr + i * A_matrix_stride, lda, datatype, self_working_copy_ptr + i * self_matrix_stride, ldb, infos_ptr ); } TORCH_CUSOLVER_CHECK(cusolverDnDestroyParams(params)); #else // USE_CUSOLVER_64_BIT int n_32 = cuda_int_cast(n, "n"); int nrhs_32 = cuda_int_cast(nrhs, "nrhs"); int lda_32 = cuda_int_cast(lda, "lda"); int ldb_32 = cuda_int_cast(ldb, "ldb"); for (int64_t i = 0; i < batch_size; i++) { at::cuda::solver::potrs<scalar_t>( handle, uplo, n_32, nrhs_32, A_ptr + i * A_matrix_stride, lda_32, self_working_copy_ptr + i * self_matrix_stride, ldb_32, infos_ptr ); } #endif // USE_CUSOLVER_64_BIT } // This code path is only dispatched to if MAGMA is not linked in the pytorch build. // cusolverDn<t>potrsBatched only supports nrhs == 1 template<typename scalar_t> inline static void apply_cholesky_cusolver_potrsBatched(Tensor& self_working_copy, const Tensor& A_column_major_copy, bool upper, Tensor& infos) { auto handle = at::cuda::getCurrentCUDASolverDnHandle(); const auto uplo = upper ? CUBLAS_FILL_MODE_UPPER : CUBLAS_FILL_MODE_LOWER; const int64_t n = self_working_copy.size(-2); const int64_t nrhs = self_working_copy.size(-1); const int64_t lda = std::max<int64_t>(1, n); const int64_t batch_size = batchCount(self_working_copy); const int64_t self_matrix_stride = matrixStride(self_working_copy); scalar_t* self_working_copy_ptr = self_working_copy.data_ptr<scalar_t>(); const scalar_t* A_ptr = A_column_major_copy.data_ptr<scalar_t>(); const int64_t A_matrix_stride = matrixStride(A_column_major_copy); const int64_t ldb = std::max<int64_t>(1, A_column_major_copy.size(-1)); int* infos_ptr = infos.data_ptr<int>(); auto self_ptr_array = get_device_pointers<scalar_t>(self_working_copy); auto A_ptr_array = get_device_pointers<scalar_t>(A_column_major_copy); at::cuda::solver::potrsBatched( handle, uplo, cuda_int_cast(n, "n"), cuda_int_cast(nrhs, "nrhs"), reinterpret_cast<scalar_t**>(A_ptr_array.data_ptr()), cuda_int_cast(lda, "lda"), reinterpret_cast<scalar_t**>(self_ptr_array.data_ptr()), cuda_int_cast(ldb, "ldb"), infos_ptr, cuda_int_cast(batch_size, "batch_size") ); } Tensor _cholesky_solve_helper_cuda_cusolver(const Tensor& self, const Tensor& A, bool upper) { const int64_t batch_size = batchCount(self); at::Tensor infos = at::zeros({1}, self.options().dtype(at::kInt)); at::Tensor self_working_copy = cloneBatchedColumnMajor(self); at::Tensor A_column_major_copy = cloneBatchedColumnMajor(A); const int64_t nrhs = self_working_copy.size(-1); // cusolverDn<t>potrsBatched only supports nrhs == 1 if (batch_size > 1 && nrhs == 1) { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "cholesky_cuda_potrs_batched", [&] { apply_cholesky_cusolver_potrsBatched<scalar_t>(self_working_copy, A_column_major_copy, upper, infos); }); } else { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "cholesky_cuda_potrs", [&] { apply_cholesky_cusolver_potrs<scalar_t>(self_working_copy, A_column_major_copy, upper, infos); }); } // info from potrs and potrsBatched only report if the i-th parameter is wrong, not about the matrix singularity, etc. // So we don't need to check it all the time. TORCH_INTERNAL_ASSERT_DEBUG_ONLY(infos.item().toInt() == 0); return self_working_copy; } void _cholesky_inverse_cusolver_potrs_based(Tensor& result, Tensor& infos, bool upper) { at::Tensor input_working_copy = cloneBatchedColumnMajor(result); at::Tensor infos_gpu = at::zeros({1}, result.options().dtype(at::kInt)); result.fill_(0); result.diagonal(/*offset=*/0, /*dim1=*/-2, /*dim2=*/-1).fill_(1); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "cholesky_cuda_potri", [&] { apply_cholesky_cusolver_potrs<scalar_t>(result, input_working_copy, upper, infos_gpu); }); // Debug only: info of cusolver potrs only check if the i-th parameter is wrong // Function argument `infos` is a CPU tensor, the following copy will cause a device-host sync. // infos.copy_(infos_gpu); } Tensor& cholesky_inverse_kernel_impl_cusolver(Tensor &result, Tensor& infos, bool upper) { _cholesky_inverse_cusolver_potrs_based(result, infos, upper); return result; } /* The geqrf function computes the QR decomposition of a m x n matrix A. Args: * `A` - [in] Tensor with matrices for QR decomposition, [out] Tensor containing R in the upper triangle of A and elementary reflectors below the main diagonal of A * `tau` - Tensor containing the magnitudes of the elementary reflectors * `m` - The number of rows of `input` to consider * `n` - The number of columns of `input` to consider (actual sizes of `input` could be larger) For further details, please see the cuSOLVER documentation for GEQRF. */ template <typename scalar_t> static void apply_geqrf(const Tensor& A, const Tensor& tau) { int64_t m = A.size(-2); int64_t n = A.size(-1); int64_t lda = std::max<int64_t>(1, m); int64_t batch_size = batchCount(A); auto A_stride = matrixStride(A); auto tau_stride = tau.size(-1); auto A_data = A.data_ptr<scalar_t>(); auto tau_data = tau.data_ptr<scalar_t>(); auto infos = at::zeros({1}, A.options().dtype(at::kInt)); auto infos_data = infos.data_ptr<int>(); // get the optimal work size and allocate workspace tensor #ifdef USE_CUSOLVER_64_BIT size_t worksize_device; // workspaceInBytesOnDevice size_t worksize_host; // workspaceInBytesOnHost cusolverDnParams_t params = NULL; // use default algorithm (currently it's the only option) at::cuda::solver::xgeqrf_bufferSize<scalar_t>( at::cuda::getCurrentCUDASolverDnHandle(), params, m, n, A_data, lda, tau_data, &worksize_device, &worksize_host); #else int lwork; int m_32 = cuda_int_cast(m, "m"); int n_32 = cuda_int_cast(n, "n"); int lda_32 = cuda_int_cast(lda, "lda"); at::cuda::solver::geqrf_bufferSize<scalar_t>( at::cuda::getCurrentCUDASolverDnHandle(), m_32, n_32, A_data, lda_32, &lwork); #endif // USE_CUSOLVER_64_BIT for (decltype(batch_size) i = 0; i < batch_size; i++) { scalar_t* A_working_ptr = &A_data[i * A_stride]; scalar_t* tau_working_ptr = &tau_data[i * tau_stride]; auto handle = at::cuda::getCurrentCUDASolverDnHandle(); #ifdef USE_CUSOLVER_64_BIT // allocate workspace storage on device and host auto& device_allocator = *at::cuda::getCUDADeviceAllocator(); auto work_device_data = device_allocator.allocate(worksize_device); auto& host_allocator = *at::getCPUAllocator(); auto work_host_data = host_allocator.allocate(worksize_host); at::cuda::solver::xgeqrf<scalar_t>( handle, params, m, n, A_working_ptr, lda, tau_working_ptr, static_cast<scalar_t*>(work_device_data.get()), worksize_device, static_cast<scalar_t*>(work_host_data.get()), worksize_host, infos_data); #else // allocate workspace storage on device auto& allocator = *at::cuda::getCUDADeviceAllocator(); auto work_data = allocator.allocate(sizeof(scalar_t) * std::max<int>(1, lwork)); at::cuda::solver::geqrf<scalar_t>( handle, m_32, n_32, A_working_ptr, lda_32, tau_working_ptr, static_cast<scalar_t*>(work_data.get()), lwork, infos_data); #endif // USE_CUSOLVER_64_BIT } // info from geqrf only reports if the i-th parameter is wrong, not about the matrix singularity // so we don't need to check it all the time TORCH_INTERNAL_ASSERT_DEBUG_ONLY(infos.item().toInt() == 0); } // This is a type dispatching helper function for 'apply_geqrf' void geqrf_cusolver(const Tensor& input, const Tensor& tau) { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(input.scalar_type(), "geqrf_cuda", [&]{ apply_geqrf<scalar_t>(input, tau); }); } /* The ormqr function multiplies Q with another matrix from a sequence of elementary reflectors, such as is produced by the geqrf function. Args: * `input` - Tensor with elementary reflectors below the diagonal, encoding the matrix Q. * `tau` - Tensor containing the magnitudes of the elementary reflectors. * `other` - [in] Tensor containing the matrix to be multiplied. [out] result of the matrix multiplication with Q. * `left` - bool, determining whether `other` is left- or right-multiplied with Q. * `transpose` - bool, determining whether to transpose (or conjugate transpose) Q before multiplying. For further details, please see the cuSOLVER documentation for ORMQR and UNMQR. */ template <typename scalar_t> static void apply_ormqr(const Tensor& input, const Tensor& tau, const Tensor& other, bool left, bool transpose) { using value_t = typename c10::scalar_value_type<scalar_t>::type; auto side = left ? CUBLAS_SIDE_LEFT : CUBLAS_SIDE_RIGHT; auto trans = transpose ? (input.is_complex() ? CUBLAS_OP_C : CUBLAS_OP_T) : CUBLAS_OP_N; auto input_data = input.data_ptr<scalar_t>(); auto tau_data = tau.data_ptr<scalar_t>(); auto other_data = other.data_ptr<scalar_t>(); auto input_matrix_stride = matrixStride(input); auto other_matrix_stride = matrixStride(other); auto tau_stride = tau.size(-1); auto batch_size = batchCount(input); auto m = cuda_int_cast(other.size(-2), "m"); auto n = cuda_int_cast(other.size(-1), "n"); auto k = cuda_int_cast(tau.size(-1), "k"); auto lda = std::max<int>(1, left ? m : n); auto ldc = std::max<int>(1, m); // get the optimal work size and allocate workspace tensor int lwork; at::cuda::solver::ormqr_bufferSize<scalar_t>( at::cuda::getCurrentCUDASolverDnHandle(), side, trans, m, n, k, input_data, lda, tau_data, other_data, ldc, &lwork); auto info = at::zeros({1}, input.options().dtype(at::kInt)); auto info_data = info.data_ptr<int>(); for (auto i = decltype(batch_size){0}; i < batch_size; i++) { scalar_t* input_working_ptr = &input_data[i * input_matrix_stride]; scalar_t* other_working_ptr = &other_data[i * other_matrix_stride]; scalar_t* tau_working_ptr = &tau_data[i * tau_stride]; auto handle = at::cuda::getCurrentCUDASolverDnHandle(); // allocate workspace storage auto& allocator = *at::cuda::getCUDADeviceAllocator(); auto work_data = allocator.allocate(sizeof(scalar_t)*lwork); at::cuda::solver::ormqr<scalar_t>( handle, side, trans, m, n, k, input_working_ptr, lda, tau_working_ptr, other_working_ptr, ldc, static_cast<scalar_t*>(work_data.get()), lwork, info_data ); // info from ormqr only reports if the i-th parameter is wrong // so we don't need to check it all the time TORCH_INTERNAL_ASSERT_DEBUG_ONLY(info.item().toInt() == 0); } } // This is a type dispatching helper function for 'apply_ormqr' void ormqr_cusolver(const Tensor& input, const Tensor& tau, const Tensor& other, bool left, bool transpose) { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(input.scalar_type(), "orgmr_cuda", [&]{ apply_ormqr<scalar_t>(input, tau, other, left, transpose); }); } /* The orgqr function allows reconstruction of an orthogonal (or unitary) matrix Q, from a sequence of elementary reflectors, such as produced by the geqrf function. Args: * `self` - Tensor with the directions of the elementary reflectors below the diagonal, it will be overwritten with the result * `tau` - Tensor containing the magnitudes of the elementary reflectors For further details, please see the cuSOLVER documentation for ORGQR and UNGQR. */ template <typename scalar_t> inline static void apply_orgqr(Tensor& self, const Tensor& tau) { using value_t = typename c10::scalar_value_type<scalar_t>::type; auto self_data = self.data_ptr<scalar_t>(); auto tau_data = tau.data_ptr<scalar_t>(); auto self_matrix_stride = matrixStride(self); auto batchsize = cuda_int_cast(batchCount(self), "batch size"); auto m = cuda_int_cast(self.size(-2), "m"); auto n = cuda_int_cast(self.size(-1), "n"); auto k = cuda_int_cast(tau.size(-1), "k"); auto tau_stride = std::max<int>(1, k); auto lda = std::max<int>(1, m); // LAPACK's requirement TORCH_INTERNAL_ASSERT(m >= n); TORCH_INTERNAL_ASSERT(n >= k); // cuSOLVER doesn't compute anything for this case, which is wrong // the result should be a matrix with 1 on the diagonal if (k == 0) { self.fill_(0); self.diagonal(/*offset=*/0, /*dim1=*/-2, /*dim2=*/-1).fill_(1); return; } // get the optimal work size and allocate workspace tensor int lwork; at::cuda::solver::orgqr_buffersize<scalar_t>( at::cuda::getCurrentCUDASolverDnHandle(), m, n, k, self_data, lda, tau_data, &lwork); auto info = at::zeros({1}, self.options().dtype(at::kInt)); auto info_data = info.data_ptr<int>(); for (auto i = decltype(batchsize){0}; i < batchsize; i++) { scalar_t* self_working_ptr = &self_data[i * self_matrix_stride]; scalar_t* tau_working_ptr = &tau_data[i * tau_stride]; auto handle = at::cuda::getCurrentCUDASolverDnHandle(); // allocate workspace storage auto& allocator = *at::cuda::getCUDADeviceAllocator(); auto work_data = allocator.allocate(sizeof(scalar_t)*lwork); at::cuda::solver::orgqr<scalar_t>( handle, m, n, k, self_working_ptr, lda, tau_working_ptr, static_cast<scalar_t*>(work_data.get()), lwork, info_data ); // info from orgqr only reports if the i-th parameter is wrong // so we don't need to check it all the time TORCH_INTERNAL_ASSERT_DEBUG_ONLY(info.item().toInt() == 0); } } // This is a type dispatching helper function for 'apply_orgqr' Tensor& orgqr_helper_cusolver(Tensor& result, const Tensor& tau) { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "orgqr_cuda", [&]{ apply_orgqr<scalar_t>(result, tau); }); return result; } template <typename scalar_t> static void apply_syevd(Tensor& values, Tensor& vectors, Tensor& infos, bool upper, bool compute_eigenvectors) { using value_t = typename c10::scalar_value_type<scalar_t>::type; cublasFillMode_t uplo = upper ? CUBLAS_FILL_MODE_UPPER : CUBLAS_FILL_MODE_LOWER; cusolverEigMode_t jobz = compute_eigenvectors ? CUSOLVER_EIG_MODE_VECTOR : CUSOLVER_EIG_MODE_NOVECTOR; int64_t n = vectors.size(-1); int64_t lda = std::max<int64_t>(1, n); int64_t batch_size = batchCount(vectors); auto vectors_stride = matrixStride(vectors); auto values_stride = values.size(-1); auto vectors_data = vectors.data_ptr<scalar_t>(); auto values_data = values.data_ptr<value_t>(); auto infos_data = infos.data_ptr<int>(); // get the optimal work size and allocate workspace tensor #ifdef USE_CUSOLVER_64_BIT size_t worksize_device; // workspaceInBytesOnDevice size_t worksize_host; // workspaceInBytesOnHost cusolverDnParams_t params = NULL; // use default algorithm (currently it's the only option) at::cuda::solver::xsyevd_bufferSize<scalar_t>( at::cuda::getCurrentCUDASolverDnHandle(), params, jobz, uplo, n, vectors_data, lda, values_data, &worksize_device, &worksize_host); #else int lwork; int n_32 = cuda_int_cast(n, "n"); int lda_32 = cuda_int_cast(lda, "lda"); at::cuda::solver::syevd_bufferSize<scalar_t>( at::cuda::getCurrentCUDASolverDnHandle(), jobz, uplo, n_32, vectors_data, lda_32, values_data, &lwork); #endif // USE_CUSOLVER_64_BIT for (decltype(batch_size) i = 0; i < batch_size; i++) { scalar_t* vectors_working_ptr = &vectors_data[i * vectors_stride]; value_t* values_working_ptr = &values_data[i * values_stride]; int* info_working_ptr = &infos_data[i]; auto handle = at::cuda::getCurrentCUDASolverDnHandle(); #ifdef USE_CUSOLVER_64_BIT // allocate workspace storage on device and host auto& device_allocator = *at::cuda::getCUDADeviceAllocator(); auto work_device_data = device_allocator.allocate(worksize_device); auto& host_allocator = *at::getCPUAllocator(); auto work_host_data = host_allocator.allocate(worksize_host); at::cuda::solver::xsyevd<scalar_t>( handle, params, jobz, uplo, n, vectors_working_ptr, lda, values_working_ptr, static_cast<scalar_t*>(work_device_data.get()), worksize_device, static_cast<scalar_t*>(work_host_data.get()), worksize_host, info_working_ptr); #else // allocate workspace storage on device auto& allocator = *at::cuda::getCUDADeviceAllocator(); auto work_data = allocator.allocate(sizeof(scalar_t) * lwork); at::cuda::solver::syevd<scalar_t>( handle, jobz, uplo, n_32, vectors_working_ptr, lda_32, values_working_ptr, static_cast<scalar_t*>(work_data.get()), lwork, info_working_ptr); #endif // USE_CUSOLVER_64_BIT } } template <typename scalar_t> static void apply_syevj(Tensor& values, Tensor& vectors, Tensor& infos, bool upper, bool compute_eigenvectors) { using value_t = typename c10::scalar_value_type<scalar_t>::type; cublasFillMode_t uplo = upper ? CUBLAS_FILL_MODE_UPPER : CUBLAS_FILL_MODE_LOWER; cusolverEigMode_t jobz = compute_eigenvectors ? CUSOLVER_EIG_MODE_VECTOR : CUSOLVER_EIG_MODE_NOVECTOR; int n = cuda_int_cast(vectors.size(-1), "n"); int lda = std::max<int>(1, n); auto batch_size = batchCount(vectors); auto vectors_stride = matrixStride(vectors); auto values_stride = values.size(-1); auto vectors_data = vectors.data_ptr<scalar_t>(); auto values_data = values.data_ptr<value_t>(); auto infos_data = infos.data_ptr<int>(); // syevj_params controls the numerical accuracy of syevj // by default the tolerance is set to machine accuracy // the maximum number of iteration of Jacobi method by default is 100 // cuSOLVER documentations says: "15 sweeps are good enough to converge to machine accuracy" // LAPACK has SVD routine based on similar Jacobi algorithm (gesvj) and there a maximum of 30 iterations is set // Let's use the default values for now syevjInfo_t syevj_params; TORCH_CUSOLVER_CHECK(cusolverDnCreateSyevjInfo(&syevj_params)); // get the optimal work size and allocate workspace tensor int lwork; at::cuda::solver::syevj_bufferSize<scalar_t>( at::cuda::getCurrentCUDASolverDnHandle(), jobz, uplo, n, vectors_data, lda, values_data, &lwork, syevj_params); for (decltype(batch_size) i = 0; i < batch_size; i++) { scalar_t* vectors_working_ptr = &vectors_data[i * vectors_stride]; value_t* values_working_ptr = &values_data[i * values_stride]; int* info_working_ptr = &infos_data[i]; auto handle = at::cuda::getCurrentCUDASolverDnHandle(); // allocate workspace storage on device auto& allocator = *at::cuda::getCUDADeviceAllocator(); auto work_data = allocator.allocate(sizeof(scalar_t) * lwork); at::cuda::solver::syevj<scalar_t>( handle, jobz, uplo, n, vectors_working_ptr, lda, values_working_ptr, static_cast<scalar_t*>(work_data.get()), lwork, info_working_ptr, syevj_params); } TORCH_CUSOLVER_CHECK(cusolverDnDestroySyevjInfo(syevj_params)); } template <typename scalar_t> static void apply_syevj_batched(Tensor& values, Tensor& vectors, Tensor& infos, bool upper, bool compute_eigenvectors) { using value_t = typename c10::scalar_value_type<scalar_t>::type; cublasFillMode_t uplo = upper ? CUBLAS_FILL_MODE_UPPER : CUBLAS_FILL_MODE_LOWER; cusolverEigMode_t jobz = compute_eigenvectors ? CUSOLVER_EIG_MODE_VECTOR : CUSOLVER_EIG_MODE_NOVECTOR; int n = cuda_int_cast(vectors.size(-1), "n"); int lda = std::max<int>(1, n); int batch_size = cuda_int_cast(batchCount(vectors), "batch_size"); auto vectors_data = vectors.data_ptr<scalar_t>(); auto values_data = values.data_ptr<value_t>(); auto infos_data = infos.data_ptr<int>(); // syevj_params controls the numerical accuracy of syevj // by default the tolerance is set to machine accuracy // the maximum number of iteration of Jacobi method by default is 100 // cuSOLVER documentations says: "15 sweeps are good enough to converge to machine accuracy" // LAPACK has SVD routine based on similar Jacobi algorithm (gesvj) and there a maximum of 30 iterations is set // Let's use the default values for now syevjInfo_t syevj_params; TORCH_CUSOLVER_CHECK(cusolverDnCreateSyevjInfo(&syevj_params)); TORCH_CUSOLVER_CHECK(cusolverDnXsyevjSetSortEig(syevj_params, 1)); auto handle = at::cuda::getCurrentCUDASolverDnHandle(); // get the optimal work size and allocate workspace tensor int lwork; at::cuda::solver::syevjBatched_bufferSize<scalar_t>( handle, jobz, uplo, n, vectors_data, lda, values_data, &lwork, syevj_params, batch_size); // allocate workspace storage on device auto& allocator = *at::cuda::getCUDADeviceAllocator(); auto work_data = allocator.allocate(sizeof(scalar_t) * lwork); at::cuda::solver::syevjBatched<scalar_t>( handle, jobz, uplo, n, vectors_data, lda, values_data, static_cast<scalar_t*>(work_data.get()), lwork, infos_data, syevj_params, batch_size); TORCH_CUSOLVER_CHECK(cusolverDnDestroySyevjInfo(syevj_params)); } static void linalg_eigh_cusolver_syevd(Tensor& eigenvalues, Tensor& eigenvectors, Tensor& infos, bool upper, bool compute_eigenvectors) { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(eigenvectors.scalar_type(), "linalg_eigh_cuda", [&] { apply_syevd<scalar_t>(eigenvalues, eigenvectors, infos, upper, compute_eigenvectors); }); } static void linalg_eigh_cusolver_syevj(Tensor& eigenvalues, Tensor& eigenvectors, Tensor& infos, bool upper, bool compute_eigenvectors) { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(eigenvectors.scalar_type(), "linalg_eigh_cuda", [&] { apply_syevj<scalar_t>(eigenvalues, eigenvectors, infos, upper, compute_eigenvectors); }); } void linalg_eigh_cusolver(Tensor& eigenvalues, Tensor& eigenvectors, Tensor& infos, bool upper, bool compute_eigenvectors) { // TODO: syevj_batched should be added here, but at least for CUDA 11.2 it contains a bug leading to incorrect results // See https://github.com/pytorch/pytorch/pull/53040#issuecomment-793626268 and https://github.com/cupy/cupy/issues/4847 // syevj is better than syevd for float32 dtype and matrix sizes 32x32 - 512x512 // See https://github.com/pytorch/pytorch/pull/53040#issuecomment-788264724 if (eigenvectors.scalar_type() == at::kFloat && eigenvectors.size(-1) >= 32 && eigenvectors.size(-1) <= 512) { return linalg_eigh_cusolver_syevj(eigenvalues, eigenvectors, infos, upper, compute_eigenvectors); } else { return linalg_eigh_cusolver_syevd(eigenvalues, eigenvectors, infos, upper, compute_eigenvectors); } } #endif // USE_CUSOLVER }} // namespace at::native
6d730b64d88e967bfaea165ab0fdf82c0c044783.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "kernel_hip.cuh" #include "stdio.h" /*________________________________________________________* * * * CUDA KERNELS AND ASSOCIATED FUNCTIONS * * * *_________________________________________________________*/ // device Kernel (can only be call by a kernel) :: define the modulo operation inline __device__ int modulo(int val, int c){ return (val & (c - 1)); } // Kernel :: fill an image with a chekcerboard pattern __global__ void _checkerboard(uchar4 *image, int step, uchar4 color1, uchar4 color2, unsigned int width, unsigned int height, unsigned int imStep) { // get the position of the current pixel int x_local = blockIdx.x * blockDim.x + threadIdx.x; int y_local = blockIdx.y * blockDim.y + threadIdx.y; // exit if the pixel is out of the size of the image if (x_local >= width || y_local >= height) return; // fill the image, alternate the colors if (modulo(x_local, step) < (step/2)) image[y_local * imStep + x_local] = modulo(y_local, step) < (step / 2) ? color1 : color2; else image[y_local * imStep + x_local] = modulo(y_local, step) < (step / 2) ? color2 : color1; } // Function :: fill an image with a chekcerboard pattern void cuCreateCheckerboard(sl::zed::Mat &image) { // get the image size unsigned int width = image.width; unsigned int height = image.height; // define the block dimension for the parallele computation dim3 dimGrid, dimBlock; dimBlock.x = 32; dimBlock.y = 8; dimGrid.x = ceill(width / (float)dimBlock.x); dimGrid.y = ceill(height / (float)dimBlock.y); // define the size of the square int step = 20; // define the two colors of the checkerboard uchar4 color1 = make_uchar4(250, 250, 250, 255); uchar4 color2 = make_uchar4(236, 172, 0, 255); // call the kernel _checkerboard << <dimGrid, dimBlock >> >((uchar4 *)image.data, step, color1, color2, width, height, image.step / sizeof(uchar4)); } // Kernel :: replace the current image by an other if the depth if above the threshold __global__ void _croppImage(float* deptharray, float* depth, uchar4 * imageIn, uchar4 * imageOut, uchar4 * mask, float threshold, unsigned int width, unsigned int height, unsigned int depthStep, unsigned int imInStep, unsigned int imOutStep, unsigned int maskStep) { // get the position of the current pixel int x_local = blockIdx.x * blockDim.x + threadIdx.x; int y_local = blockIdx.y * blockDim.y + threadIdx.y; // exit if the pixel is out of the size of the image if (x_local >= width || y_local >= height) return; // get the depth of the current pixel float D = depth[y_local * depthStep + x_local]; // the depth is strickly positive, if not it means that the depth can not be computed // the depth should be below the threshold if ((isfinite(D)) && (D < threshold + 1))// keep the current image if true imageOut[y_local * imOutStep + x_local] = imageIn[y_local * imInStep + x_local]; else // if false : replace current pixel by the pixel of the mask imageOut[y_local * imOutStep + x_local] = mask[y_local * maskStep + x_local]; if(D < threshold) deptharray[y_local * imOutStep + x_local] = 0; else if( threshold + 2 > D && D > threshold ) deptharray[y_local * imOutStep + x_local] = 1; else deptharray[y_local * imOutStep + x_local] = 2; //printf("imInStep data: %d ; imOutStep data: %d; \n", imInStep, imOutStep); } // Function :: replace the current image by an other if the depth if above the threshold void cuCroppImageByDepth(float *array, sl::zed::Mat &depth, sl::zed::Mat &imageLeft, sl::zed::Mat &imageCut, sl::zed::Mat &mask, float threshold) { // get the image size unsigned int width = depth.width; unsigned int height = depth.height; //printf("width: %d ; height: %d ;", width, height); //define the array which carry the information // float *deptharray = (float*) malloc(width*height); /* for(int i=0; i<width; i++){ for(int j=0; j<height; j++) deptharray[j*height + i] = 0; } */ // define the block dimension for the parallele computation dim3 dimGrid, dimBlock; dimBlock.x = 32; dimBlock.y = 8; dimGrid.x = ceill(width / (float)dimBlock.x); dimGrid.y = ceill(height / (float)dimBlock.y); // call the kernel _croppImage << <dimGrid, dimBlock >> >((float *) array, (float *)depth.data, (uchar4 *)imageLeft.data, (uchar4 *)imageCut.data, (uchar4 *)mask.data, threshold, width, height, depth.step / sizeof(float), imageLeft.step / sizeof(uchar4), imageCut.step / sizeof(uchar4), mask.step / sizeof(uchar4)); } //__global__ void _maximalSquare(float* matrix, int *size){ __global__ void _maximalSquare(float* matrix, int *size, float* dist){ int big = size[0]*size[0]; int small =size[1]*size[1]; int close = 0; int far = 0; int idx = blockIdx.x * blockDim.x + threadIdx.x; int idy = blockIdx.y * blockDim.y + threadIdx.y; if(idx > 1280 || idy > 720 ) return; for(int i=0; i<size[0]; i++){ for(int j = 0; j <size[0]; j++){ if(matrix[(j+idy)*1280+ i + idx] >= 1) close++; // if(matrix[(j+idy+size[0]/2-size[1]/2)*1280+i+idx+size[0]/2-size[1]/2] == 2) far++; } } for(int i=0; i<size[1]; i++){ for(int j = 0; j < size[1]; j++){ if(matrix[(j+idy+size[0]/2-size[1]/2)*1280+i+idx+size[0]/2-size[1]/2] == 2) far++; } } int tempX = idx; int tempY = idy; if(close == big && far == small){ //if(close == big){ tempX = idx + size[0]/2; tempY = idy + size[0]/2; dist[idy*1280+idx] = sqrt(((float)tempX-640)*((float)tempX-640) + ((float)tempY-360)*((float)tempY-360)); }else{ dist[idy*1280+idx] = 65535; } } void cuFindMaximalSquare(float *d_arraySquare, int* d_size, float* dist){ dim3 threadsperBlock(32,8); // call the kernel dim3 numBlocks(1280/threadsperBlock.x, 720/threadsperBlock.y); _maximalSquare << <numBlocks, threadsperBlock >> >((float*) d_arraySquare, (int*) d_size, (float*) dist); } __global__ void _minDist(float* minVal, float* dist){ extern __shared__ float shared[]; int tid = threadIdx.x; int i = blockIdx.x * blockDim.x + threadIdx.x; if(i > 720*1280 ) return; shared[tid] = dist[i]; __syncthreads(); for(unsigned int s = blockDim.x/2; s>0; s>>=1){ if(tid < s ){ if(shared[tid] > shared[tid+s]) shared[tid] = shared[tid+s]; } __syncthreads(); } if(tid == 0) { minVal[blockIdx.x] = shared[0]; } } /* __device__ void warpReduce(volatile float* sdata, unsigned int tid){ if(blockDim.x >= 64) sdata[tid] += sdata[tid+32]; if(blockDim.x >= 32) sdata[tid] += sdata[tid+16]; if(blockDim.x >= 16) sdata[tid] += sdata[tid+8]; if(blockDim.x >= 8) sdata[tid] += sdata[tid+4]; if(blockDim.x >= 2) sdata[tid] += sdata[tid+1]; } __global__ void _minDist(float* odata, float* idata){ extern __shared__ float sdata[]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x * (blockDim.x*2) + tid; unsigned int gridSize = blockDim.x*2*gridDim.x; sdata[tid] = 0; while(i<1280*720) {sdata[tid] = idata[i] + idata[i+blockDim.x]; i+=gridSize;} __syncthreads(); if(blockDim.x >= 512) {if(tid<256) {sdata[tid] += sdata[tid+256];} __syncthreads();} if(blockDim.x >= 256) {if(tid<128) {sdata[tid] += sdata[tid+128];} __syncthreads();} if(blockDim.x >= 128) {if(tid<64) {sdata[tid] += sdata[tid+64];} __syncthreads();} if(tid < 32) warpReduce(sdata, tid); if(tid == 0) odata[blockIdx.x] = sdata[0]; } */ void cuFindMinDist(float* size, float* dist){ int threads = 720; int block = 1280; hipLaunchKernelGGL(( _minDist), dim3(block), dim3(threads), sizeof(float)*threads, 0, (float*)size, (float*) dist); } __global__ void _findPos(float* dist, int* xPos, int* yPos, float* out){ int idx = blockIdx.x * blockDim.x + threadIdx.x; int idy = blockIdx.y * blockDim.y + threadIdx.y; if(idx > 1280 || idy > 720) return; double temp1 = 0; double temp2 = 0; temp1 = (double)dist[1280*idy + idx]; temp2 = (double)out[0]; //printf("out value : %f\n", temp2); //if(dist[idy*1280+idx]<65535) // printf("dist value: %f; real value: %f\n", temp1, temp2); if(temp1 == temp2){ xPos[0] = idx; // printf(" x position: %d\n", idx); yPos[0] = idy; // printf(" y position: %d\n", idy); } } void cuFindPos(float* d_dist, int* d_xPos, int* d_yPos,float* out){ dim3 threadsperBlock(16,12); dim3 numBlocks(1280/threadsperBlock.x, 720/threadsperBlock.y); _findPos << <numBlocks, threadsperBlock >> >((float*) d_dist, (int*) d_xPos, (int*) d_yPos, (float*) out); }
6d730b64d88e967bfaea165ab0fdf82c0c044783.cu
#include "kernel.cuh" #include "stdio.h" /*________________________________________________________* * * * CUDA KERNELS AND ASSOCIATED FUNCTIONS * * * *_________________________________________________________*/ // device Kernel (can only be call by a kernel) :: define the modulo operation inline __device__ int modulo(int val, int c){ return (val & (c - 1)); } // Kernel :: fill an image with a chekcerboard pattern __global__ void _checkerboard(uchar4 *image, int step, uchar4 color1, uchar4 color2, unsigned int width, unsigned int height, unsigned int imStep) { // get the position of the current pixel int x_local = blockIdx.x * blockDim.x + threadIdx.x; int y_local = blockIdx.y * blockDim.y + threadIdx.y; // exit if the pixel is out of the size of the image if (x_local >= width || y_local >= height) return; // fill the image, alternate the colors if (modulo(x_local, step) < (step/2)) image[y_local * imStep + x_local] = modulo(y_local, step) < (step / 2) ? color1 : color2; else image[y_local * imStep + x_local] = modulo(y_local, step) < (step / 2) ? color2 : color1; } // Function :: fill an image with a chekcerboard pattern void cuCreateCheckerboard(sl::zed::Mat &image) { // get the image size unsigned int width = image.width; unsigned int height = image.height; // define the block dimension for the parallele computation dim3 dimGrid, dimBlock; dimBlock.x = 32; dimBlock.y = 8; dimGrid.x = ceill(width / (float)dimBlock.x); dimGrid.y = ceill(height / (float)dimBlock.y); // define the size of the square int step = 20; // define the two colors of the checkerboard uchar4 color1 = make_uchar4(250, 250, 250, 255); uchar4 color2 = make_uchar4(236, 172, 0, 255); // call the kernel _checkerboard << <dimGrid, dimBlock >> >((uchar4 *)image.data, step, color1, color2, width, height, image.step / sizeof(uchar4)); } // Kernel :: replace the current image by an other if the depth if above the threshold __global__ void _croppImage(float* deptharray, float* depth, uchar4 * imageIn, uchar4 * imageOut, uchar4 * mask, float threshold, unsigned int width, unsigned int height, unsigned int depthStep, unsigned int imInStep, unsigned int imOutStep, unsigned int maskStep) { // get the position of the current pixel int x_local = blockIdx.x * blockDim.x + threadIdx.x; int y_local = blockIdx.y * blockDim.y + threadIdx.y; // exit if the pixel is out of the size of the image if (x_local >= width || y_local >= height) return; // get the depth of the current pixel float D = depth[y_local * depthStep + x_local]; // the depth is strickly positive, if not it means that the depth can not be computed // the depth should be below the threshold if ((isfinite(D)) && (D < threshold + 1))// keep the current image if true imageOut[y_local * imOutStep + x_local] = imageIn[y_local * imInStep + x_local]; else // if false : replace current pixel by the pixel of the mask imageOut[y_local * imOutStep + x_local] = mask[y_local * maskStep + x_local]; if(D < threshold) deptharray[y_local * imOutStep + x_local] = 0; else if( threshold + 2 > D && D > threshold ) deptharray[y_local * imOutStep + x_local] = 1; else deptharray[y_local * imOutStep + x_local] = 2; //printf("imInStep data: %d ; imOutStep data: %d; \n", imInStep, imOutStep); } // Function :: replace the current image by an other if the depth if above the threshold void cuCroppImageByDepth(float *array, sl::zed::Mat &depth, sl::zed::Mat &imageLeft, sl::zed::Mat &imageCut, sl::zed::Mat &mask, float threshold) { // get the image size unsigned int width = depth.width; unsigned int height = depth.height; //printf("width: %d ; height: %d ;", width, height); //define the array which carry the information // float *deptharray = (float*) malloc(width*height); /* for(int i=0; i<width; i++){ for(int j=0; j<height; j++) deptharray[j*height + i] = 0; } */ // define the block dimension for the parallele computation dim3 dimGrid, dimBlock; dimBlock.x = 32; dimBlock.y = 8; dimGrid.x = ceill(width / (float)dimBlock.x); dimGrid.y = ceill(height / (float)dimBlock.y); // call the kernel _croppImage << <dimGrid, dimBlock >> >((float *) array, (float *)depth.data, (uchar4 *)imageLeft.data, (uchar4 *)imageCut.data, (uchar4 *)mask.data, threshold, width, height, depth.step / sizeof(float), imageLeft.step / sizeof(uchar4), imageCut.step / sizeof(uchar4), mask.step / sizeof(uchar4)); } //__global__ void _maximalSquare(float* matrix, int *size){ __global__ void _maximalSquare(float* matrix, int *size, float* dist){ int big = size[0]*size[0]; int small =size[1]*size[1]; int close = 0; int far = 0; int idx = blockIdx.x * blockDim.x + threadIdx.x; int idy = blockIdx.y * blockDim.y + threadIdx.y; if(idx > 1280 || idy > 720 ) return; for(int i=0; i<size[0]; i++){ for(int j = 0; j <size[0]; j++){ if(matrix[(j+idy)*1280+ i + idx] >= 1) close++; // if(matrix[(j+idy+size[0]/2-size[1]/2)*1280+i+idx+size[0]/2-size[1]/2] == 2) far++; } } for(int i=0; i<size[1]; i++){ for(int j = 0; j < size[1]; j++){ if(matrix[(j+idy+size[0]/2-size[1]/2)*1280+i+idx+size[0]/2-size[1]/2] == 2) far++; } } int tempX = idx; int tempY = idy; if(close == big && far == small){ //if(close == big){ tempX = idx + size[0]/2; tempY = idy + size[0]/2; dist[idy*1280+idx] = sqrt(((float)tempX-640)*((float)tempX-640) + ((float)tempY-360)*((float)tempY-360)); }else{ dist[idy*1280+idx] = 65535; } } void cuFindMaximalSquare(float *d_arraySquare, int* d_size, float* dist){ dim3 threadsperBlock(32,8); // call the kernel dim3 numBlocks(1280/threadsperBlock.x, 720/threadsperBlock.y); _maximalSquare << <numBlocks, threadsperBlock >> >((float*) d_arraySquare, (int*) d_size, (float*) dist); } __global__ void _minDist(float* minVal, float* dist){ extern __shared__ float shared[]; int tid = threadIdx.x; int i = blockIdx.x * blockDim.x + threadIdx.x; if(i > 720*1280 ) return; shared[tid] = dist[i]; __syncthreads(); for(unsigned int s = blockDim.x/2; s>0; s>>=1){ if(tid < s ){ if(shared[tid] > shared[tid+s]) shared[tid] = shared[tid+s]; } __syncthreads(); } if(tid == 0) { minVal[blockIdx.x] = shared[0]; } } /* __device__ void warpReduce(volatile float* sdata, unsigned int tid){ if(blockDim.x >= 64) sdata[tid] += sdata[tid+32]; if(blockDim.x >= 32) sdata[tid] += sdata[tid+16]; if(blockDim.x >= 16) sdata[tid] += sdata[tid+8]; if(blockDim.x >= 8) sdata[tid] += sdata[tid+4]; if(blockDim.x >= 2) sdata[tid] += sdata[tid+1]; } __global__ void _minDist(float* odata, float* idata){ extern __shared__ float sdata[]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x * (blockDim.x*2) + tid; unsigned int gridSize = blockDim.x*2*gridDim.x; sdata[tid] = 0; while(i<1280*720) {sdata[tid] = idata[i] + idata[i+blockDim.x]; i+=gridSize;} __syncthreads(); if(blockDim.x >= 512) {if(tid<256) {sdata[tid] += sdata[tid+256];} __syncthreads();} if(blockDim.x >= 256) {if(tid<128) {sdata[tid] += sdata[tid+128];} __syncthreads();} if(blockDim.x >= 128) {if(tid<64) {sdata[tid] += sdata[tid+64];} __syncthreads();} if(tid < 32) warpReduce(sdata, tid); if(tid == 0) odata[blockIdx.x] = sdata[0]; } */ void cuFindMinDist(float* size, float* dist){ int threads = 720; int block = 1280; _minDist<<<block, threads, sizeof(float)*threads>>>((float*)size, (float*) dist); } __global__ void _findPos(float* dist, int* xPos, int* yPos, float* out){ int idx = blockIdx.x * blockDim.x + threadIdx.x; int idy = blockIdx.y * blockDim.y + threadIdx.y; if(idx > 1280 || idy > 720) return; double temp1 = 0; double temp2 = 0; temp1 = (double)dist[1280*idy + idx]; temp2 = (double)out[0]; //printf("out value : %f\n", temp2); //if(dist[idy*1280+idx]<65535) // printf("dist value: %f; real value: %f\n", temp1, temp2); if(temp1 == temp2){ xPos[0] = idx; // printf(" x position: %d\n", idx); yPos[0] = idy; // printf(" y position: %d\n", idy); } } void cuFindPos(float* d_dist, int* d_xPos, int* d_yPos,float* out){ dim3 threadsperBlock(16,12); dim3 numBlocks(1280/threadsperBlock.x, 720/threadsperBlock.y); _findPos << <numBlocks, threadsperBlock >> >((float*) d_dist, (int*) d_xPos, (int*) d_yPos, (float*) out); }
7ced724f7ab6c0a5ed4c4ef8f2caace064a052ac.hip
// !!! This is a file automatically generated by hipify!!! /*************************************************************************** * * Copyright (C) 2014 by Andrew Jameson & Ewan Barr * Licensed under the Academic Free License version 2.1 * ****************************************************************************/ #include <hip/hip_runtime.h> #include <hip/hip_complex.h> #include <inttypes.h> #include <stdio.h> #include "dada_cuda.h" #include "mopsr_calib_cuda.h" #define WARP_SIZE 32 //#define _GDEBUG 1 /* cuFloatComplex * in: Input data stream. Structured as freq/time/ant order (freq changing fastest) cuFloatComplex * out: Output data steam. Structured as freq/pair (freq changing fastest) mopsr_baseline_t * pairs: All pair combinations unsigned batch_size: Number of frequency channels unsigned nbatch: Number of time samples unsigned npairs: Number of pairs unsigned nsamps: batch_size * nbatch unsigned nread: Number of frequencies to read at a time into shared memory __global__ acc_cp_spectra_kernel(cuFloatComplex * in, cuFloatComplex * out, mopsr_baseline_t * pairs, unsigned batch_size, unsigned nbatch, unsigned npairs, unsigned nsamps, unsigned nread) { extern __shared__ cuFloatComplex shared []; // size of nread*nant int ii,jj,kk,ll; int freq_idx, ant_idx; int nsamps_by_nread = nsamps/nread; //Each block deals with nread samples from each antenna for (ii=blockIdx.x; ii<nsamps_by_nread; ii+=gridDim.x) { freq_idx = threadIdx.x % nread; //0-nread samp_idx = ii*nread; for (ant_idx = threadIdx.x/nread; ant_idx < nant; ant_idx += blockDim.x/nread) { } } int idx = blockIdx.x * blockDim.x + threadIdx.x; int freq_idx = threadIdx.x%batch_size; //0-batch_size }*/ __global__ void accumulate_cp_spectra_kernel(cuFloatComplex * in, cuFloatComplex * out, mopsr_baseline_t * pairs, unsigned batch_size, unsigned nbatch, unsigned npairs, unsigned nsamps, unsigned in_stride, unsigned out_stride) { mopsr_baseline_t pair; unsigned ii,jj,kk; cuFloatComplex val, val_a, val_b; unsigned pair_idx; unsigned pair_pos_a; unsigned pair_pos_b; unsigned bin_idx; unsigned out_idx; unsigned idx_a,idx_b; in += (blockIdx.y * in_stride); out += (blockIdx.y * out_stride); //loop over npairs (should only ever execute one loop) for (ii=0; ii<npairs; ii+=gridDim.x) { // each block operates on a single pair pair_idx = ii + blockIdx.x; pair = pairs[pair_idx]; pair_pos_a = pair.a * nsamps; pair_pos_b = pair.b * nsamps; // each thread operates on a bin in the CP spectrum //loop over each bin in the cross power spectrum for (kk=0; kk<batch_size; kk+=blockDim.x) { val = make_cuFloatComplex(0.0,0.0); bin_idx = threadIdx.x+kk; //loop over each batch in the fft for (jj=0; jj<nbatch; jj++) { idx_a = pair_pos_a + bin_idx + (jj * batch_size); idx_b = pair_pos_b + bin_idx + (jj * batch_size); val_a = in[idx_a]; val_b = in[idx_b]; val_a.x /= batch_size; val_a.y /= batch_size; val_b.x /= batch_size; val_b.y /= batch_size; val = cuCaddf(val, cuCmulf(cuConjf(val_a),val_b)); } out_idx = pair_idx*batch_size+bin_idx; out[out_idx] = cuCaddf(out[out_idx], val); } } } int mopsr_accumulate_cp_spectra(cuFloatComplex * in, cuFloatComplex * out, mopsr_baseline_t * pairs, unsigned batch_size, unsigned nbatch, unsigned npairs, unsigned nsamps, unsigned nchan, unsigned nant, hipStream_t stream) { struct hipDeviceProp_t props; hipGetDeviceProperties(&props,0); int nthreads = props.maxThreadsPerBlock; dim3 blocks = dim3(props.maxGridSize[0], nchan, 1); if (batch_size<nthreads) nthreads = batch_size; if (npairs<blocks.x) blocks.x = npairs; // stride in bytes for each channel int in_stride = batch_size * nbatch * nant; int out_stride = batch_size * npairs; //fprintf (stderr, "blocks=(%d,%d,%d) nthreads=%d in_stride=%d out_stride=%d\n", blocks.x, blocks.y, blocks.z, nthreads, in_stride, out_stride); hipLaunchKernelGGL(( accumulate_cp_spectra_kernel), dim3(blocks), dim3(nthreads), 0, stream , in, out, pairs, batch_size, nbatch, npairs, nsamps, in_stride, out_stride); #if _GDEBUG check_error_stream( "mopsr_accumulate_cp_spectra", stream); #endif return 0; } __global__ void multiply_baselines_kernel (cuFloatComplex * out, const cuFloatComplex * in, const mopsr_baseline_t * pairs, const unsigned batch_size, const unsigned npairs) { // element-wise multiplication of baseline pairs // primary component is multiplied by complex conjugate // of secondary component. mopsr_baseline_t pair; int ii,jj,out_idx,idx_a,idx_b,pair_idx; for (ii=0; ii<npairs; ii+=gridDim.x) { pair_idx = ii + blockIdx.x; if (pair_idx<npairs) { pair = pairs[pair_idx]; for (jj=0; jj<batch_size; jj+=blockDim.x) { idx_a = pair.a * batch_size + threadIdx.x + jj; idx_b = pair.b * batch_size + threadIdx.x + jj; out_idx = pair_idx * batch_size + threadIdx.x + jj; out[out_idx] = cuCmulf(in[idx_a],cuConjf(in[idx_b])); } } } } int mopsr_multiply_baselines(cuFloatComplex * out, cuFloatComplex * in, mopsr_baseline_t * pairs, unsigned batch_size, unsigned npairs, hipStream_t stream) { struct hipDeviceProp_t props; hipGetDeviceProperties(&props,0); int nthreads = props.maxThreadsPerBlock; // get max threads int nblocks = props.maxGridSize[0]; // get max blocks if (batch_size<nthreads) nthreads = batch_size; if (npairs<nblocks) nblocks = npairs; hipLaunchKernelGGL(( multiply_baselines_kernel), dim3(nblocks), dim3(nthreads), 0, stream , out, in, pairs, batch_size, npairs); #if _GDEBUG check_error_stream( "mopsr_multiply_baselines", stream); #endif // sync and error check? return 0; } __global__ void static_delay_kernel(cuFloatComplex * in, float * out, float * out_errors, unsigned npairs, unsigned batch_size) { // Quick and dirty implementation // With some thought, this could be converted to a parallel reduction // each thread does the max calculation for batch_size points unsigned pair_idx = blockIdx.x * blockDim.x + threadIdx.x; unsigned idx; unsigned max_pos=0; float max_val=0; float val; int x1_bin,x2_bin,x3_bin; float x1,x2,x3; float y1,y2,y3; cuFloatComplex tmp; float mean; float std; float sn; float variance; float sum = 0.0; float sq_sum = 0.0; double scaling; if (pair_idx < npairs) { idx = pair_idx * batch_size; tmp = in[idx]; scaling = tmp.x*tmp.x + tmp.y*tmp.y; for (int ii=0;ii<batch_size;ii++) { idx = pair_idx * batch_size + ii; tmp = in[idx]; val = tmp.x*tmp.x + tmp.y*tmp.y; val /= scaling; sum += val; sq_sum += val*val; if (val>max_val) { max_pos = ii; max_val = val; } } //float n = (float) batch_size-1; int n = batch_size-1; sum -= max_val; sq_sum -= max_val*max_val; mean = sum / n; variance = sq_sum/n - mean*mean; std = sqrt(variance); sn = (max_val-mean)/std; x1_bin = (max_pos-1)%batch_size; x2_bin = max_pos; x3_bin = (max_pos+1)%batch_size; x1 = max_pos-1; x2 = max_pos; x3 = max_pos+1; y1 = (float) cuCabsf(in[pair_idx * batch_size + x1_bin]); y2 = (float) cuCabsf(in[pair_idx * batch_size + x2_bin]); y3 = (float) cuCabsf(in[pair_idx * batch_size + x3_bin]); float denom = (x1 - x2) * (x1 - x3) * (x2 - x3); float A = (x3 * (y2 - y1) + x2 * (y1 - y3) + x1 * (y3 - y2)) / denom; float B = (x3*x3 * (y1 - y2) + x2*x2 * (y3 - y1) + x1*x1 * (y2 - y3)) / denom; //float C = (x2 * x3 * (x2 - x3) * y1 + x3 * x1 * (x3 - x1) * y2 + x1 * x2 * (x1 - x2) * y3) / denom; //printf("thread %d: %.4f\n",threadIdx.x,-B / (2*A)); //float xv = -B / (2*A); float xv = fmodf((-B / (2*A) + batch_size/2), (float) batch_size) - batch_size/2; //yv = C - B*B / (4*A); //out[pair_idx] = fmodf((-batch_size/2 + xv) - batch_size/2,batch_size); out[pair_idx] = xv; out_errors[pair_idx] = sn; //out[pair_idx] = sq_sum/n; //out_errors[pair_idx] = mean*mean; } } int mopsr_static_delay(cuFloatComplex * in, float * out, float * out_errors, unsigned npairs, unsigned batch_size, hipStream_t stream) { struct hipDeviceProp_t props; hipGetDeviceProperties(&props,0); int nthreads = props.maxThreadsPerBlock; int nblocks = npairs/nthreads + 1; hipLaunchKernelGGL(( static_delay_kernel), dim3(nblocks),dim3(nthreads),0,stream, in, out, out_errors, npairs, batch_size); #if _GDEBUG check_error_stream( "static_delay_kernel", stream); #endif return 0; } __global__ void accumulate_bandpass_kernel(cuFloatComplex* in, cuFloatComplex* accumulator, unsigned nant, unsigned nsamps, unsigned batch_size) { // in data should be in ST order //each block sums all batches for a given antenna unsigned ant = blockIdx.x; unsigned offset = ant * nsamps; unsigned pos = threadIdx.x; unsigned out_idx = ant * batch_size + pos; cuFloatComplex val = make_cuFloatComplex(0.0,0.0); //cuFloatComplex inval; //float fft_scale = (float) batch_size; // loop over all time samples for given antenna for (int ii=pos; ii<nsamps; ii+=batch_size) { // loop over batch_size to account for the condition batch_size > blockDim.x for (int jj=0; jj<batch_size; jj+=blockDim.x) //inval = in[ offset + ii + jj]; //inval.x /= fft_scale; //inval.y /= fft_scale; //val = cuCaddf( val, inval); val = cuCaddf( val, in[ offset + ii + jj] ); } // add result to the accumulator array accumulator[out_idx] = cuCaddf(val, accumulator[out_idx]); } int mopsr_accumulate_bandpass(cuFloatComplex* in, cuFloatComplex* accumulator, unsigned nant, unsigned nsamps, unsigned batch_size, hipStream_t stream) { struct hipDeviceProp_t props; hipGetDeviceProperties(&props,0); int nthreads = props.maxThreadsPerBlock; if (batch_size < nthreads) nthreads = batch_size; #ifdef _GDEBUG fprintf (stderr, "mopsr_accumulate_bandpass: nant=%u, nsamps=%u, batch_size=%u\n", nant, nsamps, batch_size); fprintf (stderr, "mopsr_accumulate_bandpass: nblocks=%u, nthreads=%d\n", nant, nthreads); #endif hipLaunchKernelGGL(( accumulate_bandpass_kernel), dim3(nant), dim3(nthreads), 0, stream, in, accumulator, nant, nsamps, batch_size); #if _GDEBUG check_error_stream("accumulate_bandpass_kernel", stream); #endif return 0; } __global__ void byte_to_float_kernel (const char * input, float * output, uint64_t size) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < size) { unsigned ii; for (ii=idx; ii<size; ii+=gridDim.x*blockDim.x) { output[ii] = ((float) input[ii]) / 127.0; } } } // convert complex 8-bit input to 32-bit int mopsr_byte_to_float (int16_t * input, cuFloatComplex * output, unsigned nsamp, unsigned nant, unsigned nchan, hipStream_t stream) { struct hipDeviceProp_t props; hipGetDeviceProperties(&props, 0); int nthreads = props.maxThreadsPerBlock; const unsigned ndim = 2; uint64_t size = uint64_t(nsamp) * nant * nchan * ndim; int max_blocks = props.maxGridSize[0]; int nblocks; if (size/nthreads > max_blocks) nblocks = max_blocks; else nblocks = size/nthreads; #ifdef _GDEBUG fprintf (stderr, "mopsr_byte_to_float: nsamp=%lu nant=%u nchan=%u\n", nsamp, nant, nchan); fprintf (stderr, "mopsr_byte_to_float: blocks=(%d,%d,%d) nthreads=%d\n", blocks.x, blocks.y, blocks.z, nthreads); #endif hipLaunchKernelGGL(( byte_to_float_kernel), dim3(nblocks),dim3(nthreads),0,stream, (const char*) input, (float *)output, size); #if _GDEBUG check_error_stream("byte_to_float_kernel", stream); #endif return 0; } __global__ void transpose_TS_to_ST_kernel (const int16_t * input, cuFloatComplex * output, const uint64_t nsamp, const unsigned nant, const uint64_t nval, const unsigned nval_per_thread, const unsigned nsamp_per_block) { extern __shared__ int16_t sdata[]; const unsigned warp_num = threadIdx.x / WARP_SIZE; const unsigned warp_idx = threadIdx.x % WARP_SIZE; const unsigned offset = (warp_num * (WARP_SIZE * nval_per_thread)) + warp_idx; unsigned in_idx = (blockIdx.x * blockDim.x * nval_per_thread) + offset; unsigned sin_idx = offset; // for use in access each 8bit value int8_t * sdata8 = (int8_t *) sdata; unsigned ival; for (ival=0; ival<nval_per_thread; ival++) { if (in_idx < nval * nval_per_thread) sdata[sin_idx] = input[in_idx]; else sdata[sin_idx] = 0; in_idx += WARP_SIZE; sin_idx += WARP_SIZE; } __syncthreads(); // our thread number within the warp [0-32], also the time sample this will write each time const unsigned isamp = warp_idx; unsigned iant = warp_num * nval_per_thread; unsigned sout_idx = (isamp * nant) + iant; // block offset isamp warp offset thread offset uint64_t out_idx = (blockIdx.x * nsamp_per_block) + (nsamp * iant) + isamp; float imag, real; for (ival=0; ival<nval_per_thread; ival++) { imag = (float) sdata8[2*sout_idx]; real = (float) sdata8[2*sout_idx + 1]; //if (out_idx < nval * nval_per_thread) output[out_idx] = make_cuFloatComplex(real,imag); // update the output index out_idx += nsamp; sout_idx++; } } int mopsr_transpose_TS_to_ST(void * d_in, void * d_out, uint64_t nbytes, unsigned nant, hipStream_t stream) { const unsigned ndim = 2; struct hipDeviceProp_t props; hipGetDeviceProperties(&props,0); unsigned nthread = props.maxThreadsPerBlock; // since we want a warp of 32 threads to write out just 1 chunk const unsigned nsamp_per_block = WARP_SIZE; const unsigned nval_per_block = nsamp_per_block * nant; // special case where not a clean multiple [TODO validate this!] if (nval_per_block % nthread) { unsigned numerator = nval_per_block; while ( numerator > nthread ) numerator /= 2; nthread = numerator; } unsigned nval_per_thread = nval_per_block / nthread; const uint64_t nsamp = nbytes / (ndim * nant); // the total number of values we have to process is const uint64_t nval = nbytes / (ndim * nval_per_thread); int nblocks = nval / nthread; if (nval % nthread) nblocks++; const size_t sdata_bytes = nthread * ndim * nval_per_thread + (2 * nant); #ifdef _GDEBUG fprintf (stderr, "mopsr_transpose_TS_to_ST: nthread=%u\n", nthread); fprintf (stderr, "mopsr_transpose_TS_to_ST: nsamp=%lu, nant=%u, nval=%lu\n", nsamp, nant, nval); fprintf (stderr, "nsamp_per_block=%u nval_per_block=%u nval_per_thread=%u\n", nsamp_per_block, nval_per_block, nval_per_thread); fprintf (stderr, "nblocks=%d, sdata_bytes=%ld\n", nblocks, sdata_bytes); #endif hipLaunchKernelGGL(( transpose_TS_to_ST_kernel), dim3(nblocks),dim3(nthread),sdata_bytes,stream, (int16_t *) d_in, (cuFloatComplex *) d_out, nsamp, nant, nval, nval_per_thread, nsamp_per_block); #if _GDEBUG check_error_stream("transpose_TS_to_ST_kernel", stream); #endif return 0; } // Perform compute the SK estimator for each block of M samples, zapping them if // they exceed the threshold. __global__ void mopsr_skzap_kernel (cuFloatComplex * in, const uint64_t ndat, float M_fac, float sk_lower, float sk_upper) { extern __shared__ float sdata_sk[]; const unsigned M = blockDim.x; const unsigned i = blockIdx.x * blockDim.x + threadIdx.x; const unsigned s1 = (threadIdx.x*2); const unsigned s2 = (threadIdx.x*2) + 1; cuFloatComplex val; if (i < ndat) val = in[i]; const float power = (val.x * val.x) + (val.y * val.y); sdata_sk[s1] = power; sdata_sk[s2] = power * power; __syncthreads(); int last_offset = blockDim.x/2 + blockDim.x % 2; for (int offset = blockDim.x/2; offset > 0; offset >>= 1) { // add a partial sum upstream to our own if (threadIdx.x < offset) { sdata_sk[s1] += sdata_sk[s1 + (2*offset)]; sdata_sk[s2] += sdata_sk[s2 + (2*offset)]; } __syncthreads(); // special case for non power of 2 reductions if ((last_offset % 2) && (last_offset > 2) && (threadIdx.x == offset)) { sdata_sk[0] += sdata_sk[s1 + (2*offset)]; sdata_sk[1] += sdata_sk[s2 + (2*offset)]; } last_offset = offset; // wait until all threads in the block have updated their partial sums __syncthreads(); } // all threads read the S1 and S2 sums const float S1 = sdata_sk[0]; const float S2 = sdata_sk[1]; const float SK_estimate = M_fac * (M * (S2 / (S1 * S1)) - 1); if ((i < ndat) && ((SK_estimate > sk_upper) || (SK_estimate < sk_lower))) { in[i] = make_cuFloatComplex(0.0,0.0); } } // // Compute the S1 and S2 sums for blocks of input data, writing the S1 and S2 sums out to Gmem // __global__ void mopsr_skcompute_kernel (cuFloatComplex * in, cuFloatComplex * sums, const unsigned nval_per_thread, const uint64_t ndat, unsigned iant) { extern __shared__ float sdata_skc[]; unsigned idx = (blockIdx.x * blockDim.x + threadIdx.x) * nval_per_thread; const unsigned s1 = (threadIdx.x*2); const unsigned s2 = (threadIdx.x*2) + 1; cuFloatComplex val; float s1_sum = 0; float s2_sum = 0; float power; for (unsigned ival=0; ival<nval_per_thread; ival++) { if (idx < ndat) { val = in[idx]; //if ((iant == 3) && (blockIdx.x == 0)) // printf ("%u %f %f\n", idx, val.x, val.y); power = (val.x * val.x) + (val.y * val.y); s1_sum += power; s2_sum += (power * power); } idx += blockDim.x; } sdata_skc[s1] = s1_sum; sdata_skc[s2] = s2_sum; __syncthreads(); // This is a parallel reduction. On kepler+ cards this could be done better using // shuf_down(), but lets keep it generic for now int last_offset = blockDim.x/2 + blockDim.x % 2; for (int offset = blockDim.x/2; offset > 0; offset >>= 1) { // add a partial sum upstream to our own if (threadIdx.x < offset) { sdata_skc[s1] += sdata_skc[s1 + (2*offset)]; sdata_skc[s2] += sdata_skc[s2 + (2*offset)]; } __syncthreads(); // special case for non power of 2 reductions if ((last_offset % 2) && (last_offset > 2) && (threadIdx.x == offset)) { sdata_skc[0] += sdata_skc[s1 + (2*offset)]; sdata_skc[1] += sdata_skc[s2 + (2*offset)]; } last_offset = offset; // wait until all threads in the block have updated their partial sums __syncthreads(); } if (threadIdx.x == 0) { sums[blockIdx.x].x = sdata_skc[0]; sums[blockIdx.x].y = sdata_skc[1]; //if ((iant == 3) && (blockIdx.x == 0)) // printf ("SUM %f %f\n", sdata_skc[0], sdata_skc[1]); } } //__inline__ __device__ int warpReduceSum (int val) //{ // for (int offset = warpSize/2; offset > 0; offset /= 2) // val += __shfl_down(val, offset); // return val; //} // // take the S1 and S2 values in sums.x and sums.y that were computed from M samples, and integrate of nsums blocks to // compute a sk mask and zap // __global__ void mopsr_skmask_kernel (float * in, cuFloatComplex * sums, unsigned nsums, unsigned M, unsigned nval_per_thread, unsigned nsamp_per_thread, unsigned iant) { // Pearson Type IV SK limits for 3sigma RFI rejection, based on 2^index //const unsigned sk_idx_max = 20; const float sk_low[20] = { 0, 0, 0, 0, 0, 0.387702, 0.492078, 0.601904, 0.698159, 0.775046, 0.834186, 0.878879, 0.912209, 0.936770, 0.954684, 0.967644, 0.976961, 0.983628, 0.988382, 0.991764 }; const float sk_high[20] = { 0, 0, 0, 0, 0, 2.731480, 2.166000, 1.762970, 1.495970, 1.325420, 1.216950, 1.146930, 1.100750, 1.069730, 1.048570, 1.033980, 1.023850, 1.016780, 1.011820, 1.008340 }; /* // 4 sigma limits const float sk_low[20] = { 0, 0, 0, 0, 0, 0.274561, 0.363869, 0.492029, 0.613738, 0.711612, 0.786484, 0.843084, 0.885557, 0.917123, 0.940341, 0.957257, 0.969486, 0.978275, 0.984562, 0.989046 }; const float sk_high[20] = { 0, 0, 0, 0, 0, 4.27587, 3.11001, 2.29104, 1.784, 1.48684, 1.31218, 1.20603, 1.13893, 1.0951, 1.06577, 1.0458, 1.03204, 1.02249, 1.01582, 1.01115 }; */ // zap mask for each set of M samples extern __shared__ char smask[]; // initialize zap mask to 0 { unsigned idx = threadIdx.x; for (unsigned ival=0; ival<nval_per_thread; ival++) { if (idx < nsums) { smask[idx] = 0; idx += blockDim.x; } } } __syncthreads(); const unsigned log2_M = (unsigned) log2f (M); unsigned sk_idx_max = 20; unsigned idx = threadIdx.x; for (unsigned ival=0; ival<nval_per_thread; ival++) { if (idx < nsums) { for (unsigned sk_idx = log2_M; sk_idx < sk_idx_max; sk_idx ++) { unsigned powers_to_add = sk_idx - log2_M; unsigned to_add = (unsigned) exp2f(powers_to_add); //if ((iant == 0) && (threadIdx.x == 0)) //{ // printf ("[%d] sk_idx=%u powers_to_add=%u to_add=%u\n", ival, sk_idx, powers_to_add, to_add); //} //printf("to_add=%u\n", to_add); if (idx + to_add <= nsums) { //float s1 = sums[idx].x; //float s2 = sums[idx].y; const float m = M * to_add; const float m_fac = (m + 1) / (m - 1); //if ((iant == 0) && (threadIdx.x == 0)) // printf ("[%d] sums[%d] = (%f, %f)\n", ival, idx, s1, s2); float s1 = 0; float s2 = 0; for (unsigned ichunk=0; ichunk < to_add; ichunk++) { s1 += sums[idx + ichunk].x; s2 += sums[idx + ichunk].y; //if ((iant == 0) && (threadIdx.x == 0)) // printf ("[%d] sums[%d] = (%f, %f)\n", ival, idx+ichunk, sums[idx + ichunk].x, sums[idx + ichunk].y); } float sk_estimate = m_fac * (m * (s2 / (s1 * s1)) - 1); //if ((iant == 0) && (threadIdx.x == 0)) // printf ("[%d] total = (%f, %f), m=%f, sk=%f\n", ival, s1, s2, m, sk_estimate); //if (threadIdx.x == 0) //{ // printf ("ival=%d idx=%d s1=%e s2=%e sk=%f\n", ival, idx, s1, s2, sk_estimate); //} if ((sk_estimate < sk_low[sk_idx]) || (sk_estimate > sk_high[sk_idx])) { //if (iant == 0) // printf ("[%d][%d] s1=%e s2=%e sk_estimate=%e\n", // iant, idx, s1, s2, sk_estimate); for (unsigned ichunk=0; ichunk < to_add; ichunk++) { //if (iant == 0) // printf ("MASK: ant=%u block=%d\n", iant, idx+ichunk); smask[idx+ichunk] = 1; } } } } idx += blockDim.x; } } // sync here to be sure the smask is now updated __syncthreads(); // now we want to zap all blocks of input that have an associated mask // note that this kernel has only 1 block, with blockDim.x threads that may not match float * indat = in; nsamp_per_thread *= 2; for (unsigned isum=0; isum<nsums; isum++) { if (smask[isum] == 1) { //if ((iant == 0) && (threadIdx.x == 0)) // printf ("zapping chunk %d\n", isum); unsigned idx = threadIdx.x; for (unsigned isamp=0; isamp<nsamp_per_thread; isamp++) { if (idx < nsums) { indat[idx] = 0; idx += blockDim.x; } } } indat += 2 * M; } } // // relies on ST ordering of the data // void mopsr_skzap (float * in, uint64_t nbytes , unsigned nant, unsigned tscrunch, float sk_lower, float sk_upper, hipStream_t stream) { //printf ("mopsr_skzap (%p, %lu, %u, %u, %f, %f)\n", in, nbytes, nant, tscrunch, sk_lower, sk_upper); const float M = (float) tscrunch; const float M_fac = (M+1) / (M-1); const unsigned ndim = 2; uint64_t ndat = nbytes / (nant * ndim * sizeof(float)); unsigned block_size = tscrunch; uint64_t nblocks = ndat / block_size; uint64_t ndat_proc = nblocks * block_size; //printf ("mopsr_skzap: ndat=%u ndat_proc=%lu block_size=%u nblocks=%lu\n", ndat, ndat_proc, block_size, nblocks); size_t shm_bytes = block_size * ndim * sizeof(float); unsigned iant; float * indat = in; for (iant=0; iant<nant; iant++) { // foreach block reduce to S1, S2 sums [out of place] //printf ("mopsr_skzap: iant=%d offset=%u M=%f M_fac=%f\n", iant, (iant * ndat * ndim), M, M_fac); hipLaunchKernelGGL(( mopsr_skzap_kernel), dim3(nblocks),dim3(block_size),shm_bytes, stream, (cuFloatComplex *) indat, ndat_proc, M_fac, sk_lower, sk_upper); #ifdef _GDEBUG check_error_stream ("mopsr_skzap", stream); #endif indat += (ndat * ndim); } } // // relies on ST ordering of the data // void mopsr_skzap2 (float * in, void ** work_buffer, size_t * work_buffer_size, uint64_t nbytes , unsigned nant, unsigned tscrunch, hipStream_t stream) { #ifdef _GDEBUG fprintf (stderr, "mopsr_skzap2 (%p, %p, %ld, %lu, %u, %u)\n", in, *work_buffer, *work_buffer_size, nbytes, nant, tscrunch); #endif unsigned nthreads = 1024; unsigned nval_per_thread = 1; if (tscrunch > nthreads) nval_per_thread = tscrunch / nthreads; else nthreads = tscrunch; // each block is a single integration const unsigned ndim = 2; uint64_t ndat = nbytes / (nant * ndim * sizeof(float)); uint64_t nblocks = ndat / tscrunch; size_t shm_bytes = tscrunch * ndim * sizeof(float); size_t bytes_req = nblocks * 2 * sizeof(float); #ifdef _GDEBUG fprintf (stderr, "mopsr_skzap2: work_buffer_size=%ld bytes_req=%ld\n", *work_buffer_size, bytes_req); #endif if (*work_buffer_size < bytes_req) { if (*work_buffer != NULL) { #ifdef _GDEBUG fprintf (stderr, "freeing work_buffer\n"); #endif hipFree (*work_buffer); } hipMalloc (work_buffer, bytes_req); #ifdef _GDEBUG fprintf (stderr, "mopsr_skzap2: allocated %ld bytes, ptr=%p\n", bytes_req, *work_buffer); #endif *work_buffer_size = bytes_req; } #ifdef _GDEBUG fprintf (stderr, "ndat=%lu\n", ndat); #endif unsigned nthread_mask = 1024; unsigned nval_per_thread_mask = 1; if (nblocks > nthread_mask) { nval_per_thread_mask = nblocks / nthread_mask; if (nblocks % nthread_mask) nval_per_thread_mask++; } else nthread_mask = nblocks; unsigned shm_bytes_mask = nblocks; unsigned nsamp_per_thread_mask = tscrunch / nthread_mask; if (tscrunch % nthread_mask) nsamp_per_thread_mask++; unsigned iant; float * indat = in; for (iant=0; iant<nant; iant++) { // foreach block reduce to S1, S2 sums [out of place] #ifdef _GDEBUG fprintf (stderr, "nblocks=%u, nthreads=%u, shm_bytes=%u nval_per_thread=%u ndat=%u work_buffer=%p\n", nblocks, nthreads, shm_bytes, nval_per_thread, ndat, *work_buffer); #endif hipLaunchKernelGGL(( mopsr_skcompute_kernel), dim3(nblocks), dim3(nthreads), shm_bytes, stream, (cuFloatComplex *) indat, (cuFloatComplex *) *work_buffer, nval_per_thread, ndat, iant); #ifdef _GDEBUG check_error_stream ("mopsr_skcompute_kernel", stream); #endif #ifdef _GDEBUG fprintf (stderr, "nthread_mask=%u shm_bytes_mask=%u nval_per_thread_mask=%u nsamp_per_thread_mask=%u\n", nthread_mask, shm_bytes_mask, nval_per_thread_mask, nsamp_per_thread_mask); #endif hipLaunchKernelGGL(( mopsr_skmask_kernel), dim3(1), dim3(nthread_mask), shm_bytes_mask, stream, indat, (cuFloatComplex *) *work_buffer, nblocks, tscrunch, nval_per_thread_mask, nsamp_per_thread_mask, iant); #ifdef _GDEBUG check_error_stream ("mopsr_skmask_kernel", stream); #endif indat += (ndat * ndim); } }
7ced724f7ab6c0a5ed4c4ef8f2caace064a052ac.cu
/*************************************************************************** * * Copyright (C) 2014 by Andrew Jameson & Ewan Barr * Licensed under the Academic Free License version 2.1 * ****************************************************************************/ #include <cuda_runtime.h> #include <cuComplex.h> #include <inttypes.h> #include <stdio.h> #include "dada_cuda.h" #include "mopsr_calib_cuda.h" #define WARP_SIZE 32 //#define _GDEBUG 1 /* cuFloatComplex * in: Input data stream. Structured as freq/time/ant order (freq changing fastest) cuFloatComplex * out: Output data steam. Structured as freq/pair (freq changing fastest) mopsr_baseline_t * pairs: All pair combinations unsigned batch_size: Number of frequency channels unsigned nbatch: Number of time samples unsigned npairs: Number of pairs unsigned nsamps: batch_size * nbatch unsigned nread: Number of frequencies to read at a time into shared memory __global__ acc_cp_spectra_kernel(cuFloatComplex * in, cuFloatComplex * out, mopsr_baseline_t * pairs, unsigned batch_size, unsigned nbatch, unsigned npairs, unsigned nsamps, unsigned nread) { extern __shared__ cuFloatComplex shared []; // size of nread*nant int ii,jj,kk,ll; int freq_idx, ant_idx; int nsamps_by_nread = nsamps/nread; //Each block deals with nread samples from each antenna for (ii=blockIdx.x; ii<nsamps_by_nread; ii+=gridDim.x) { freq_idx = threadIdx.x % nread; //0-nread samp_idx = ii*nread; for (ant_idx = threadIdx.x/nread; ant_idx < nant; ant_idx += blockDim.x/nread) { } } int idx = blockIdx.x * blockDim.x + threadIdx.x; int freq_idx = threadIdx.x%batch_size; //0-batch_size }*/ __global__ void accumulate_cp_spectra_kernel(cuFloatComplex * in, cuFloatComplex * out, mopsr_baseline_t * pairs, unsigned batch_size, unsigned nbatch, unsigned npairs, unsigned nsamps, unsigned in_stride, unsigned out_stride) { mopsr_baseline_t pair; unsigned ii,jj,kk; cuFloatComplex val, val_a, val_b; unsigned pair_idx; unsigned pair_pos_a; unsigned pair_pos_b; unsigned bin_idx; unsigned out_idx; unsigned idx_a,idx_b; in += (blockIdx.y * in_stride); out += (blockIdx.y * out_stride); //loop over npairs (should only ever execute one loop) for (ii=0; ii<npairs; ii+=gridDim.x) { // each block operates on a single pair pair_idx = ii + blockIdx.x; pair = pairs[pair_idx]; pair_pos_a = pair.a * nsamps; pair_pos_b = pair.b * nsamps; // each thread operates on a bin in the CP spectrum //loop over each bin in the cross power spectrum for (kk=0; kk<batch_size; kk+=blockDim.x) { val = make_cuFloatComplex(0.0,0.0); bin_idx = threadIdx.x+kk; //loop over each batch in the fft for (jj=0; jj<nbatch; jj++) { idx_a = pair_pos_a + bin_idx + (jj * batch_size); idx_b = pair_pos_b + bin_idx + (jj * batch_size); val_a = in[idx_a]; val_b = in[idx_b]; val_a.x /= batch_size; val_a.y /= batch_size; val_b.x /= batch_size; val_b.y /= batch_size; val = cuCaddf(val, cuCmulf(cuConjf(val_a),val_b)); } out_idx = pair_idx*batch_size+bin_idx; out[out_idx] = cuCaddf(out[out_idx], val); } } } int mopsr_accumulate_cp_spectra(cuFloatComplex * in, cuFloatComplex * out, mopsr_baseline_t * pairs, unsigned batch_size, unsigned nbatch, unsigned npairs, unsigned nsamps, unsigned nchan, unsigned nant, cudaStream_t stream) { struct cudaDeviceProp props; cudaGetDeviceProperties(&props,0); int nthreads = props.maxThreadsPerBlock; dim3 blocks = dim3(props.maxGridSize[0], nchan, 1); if (batch_size<nthreads) nthreads = batch_size; if (npairs<blocks.x) blocks.x = npairs; // stride in bytes for each channel int in_stride = batch_size * nbatch * nant; int out_stride = batch_size * npairs; //fprintf (stderr, "blocks=(%d,%d,%d) nthreads=%d in_stride=%d out_stride=%d\n", blocks.x, blocks.y, blocks.z, nthreads, in_stride, out_stride); accumulate_cp_spectra_kernel<<< blocks, nthreads, 0, stream >>>(in, out, pairs, batch_size, nbatch, npairs, nsamps, in_stride, out_stride); #if _GDEBUG check_error_stream( "mopsr_accumulate_cp_spectra", stream); #endif return 0; } __global__ void multiply_baselines_kernel (cuFloatComplex * out, const cuFloatComplex * in, const mopsr_baseline_t * pairs, const unsigned batch_size, const unsigned npairs) { // element-wise multiplication of baseline pairs // primary component is multiplied by complex conjugate // of secondary component. mopsr_baseline_t pair; int ii,jj,out_idx,idx_a,idx_b,pair_idx; for (ii=0; ii<npairs; ii+=gridDim.x) { pair_idx = ii + blockIdx.x; if (pair_idx<npairs) { pair = pairs[pair_idx]; for (jj=0; jj<batch_size; jj+=blockDim.x) { idx_a = pair.a * batch_size + threadIdx.x + jj; idx_b = pair.b * batch_size + threadIdx.x + jj; out_idx = pair_idx * batch_size + threadIdx.x + jj; out[out_idx] = cuCmulf(in[idx_a],cuConjf(in[idx_b])); } } } } int mopsr_multiply_baselines(cuFloatComplex * out, cuFloatComplex * in, mopsr_baseline_t * pairs, unsigned batch_size, unsigned npairs, cudaStream_t stream) { struct cudaDeviceProp props; cudaGetDeviceProperties(&props,0); int nthreads = props.maxThreadsPerBlock; // get max threads int nblocks = props.maxGridSize[0]; // get max blocks if (batch_size<nthreads) nthreads = batch_size; if (npairs<nblocks) nblocks = npairs; multiply_baselines_kernel<<< nblocks, nthreads, 0, stream >>>(out, in, pairs, batch_size, npairs); #if _GDEBUG check_error_stream( "mopsr_multiply_baselines", stream); #endif // sync and error check? return 0; } __global__ void static_delay_kernel(cuFloatComplex * in, float * out, float * out_errors, unsigned npairs, unsigned batch_size) { // Quick and dirty implementation // With some thought, this could be converted to a parallel reduction // each thread does the max calculation for batch_size points unsigned pair_idx = blockIdx.x * blockDim.x + threadIdx.x; unsigned idx; unsigned max_pos=0; float max_val=0; float val; int x1_bin,x2_bin,x3_bin; float x1,x2,x3; float y1,y2,y3; cuFloatComplex tmp; float mean; float std; float sn; float variance; float sum = 0.0; float sq_sum = 0.0; double scaling; if (pair_idx < npairs) { idx = pair_idx * batch_size; tmp = in[idx]; scaling = tmp.x*tmp.x + tmp.y*tmp.y; for (int ii=0;ii<batch_size;ii++) { idx = pair_idx * batch_size + ii; tmp = in[idx]; val = tmp.x*tmp.x + tmp.y*tmp.y; val /= scaling; sum += val; sq_sum += val*val; if (val>max_val) { max_pos = ii; max_val = val; } } //float n = (float) batch_size-1; int n = batch_size-1; sum -= max_val; sq_sum -= max_val*max_val; mean = sum / n; variance = sq_sum/n - mean*mean; std = sqrt(variance); sn = (max_val-mean)/std; x1_bin = (max_pos-1)%batch_size; x2_bin = max_pos; x3_bin = (max_pos+1)%batch_size; x1 = max_pos-1; x2 = max_pos; x3 = max_pos+1; y1 = (float) cuCabsf(in[pair_idx * batch_size + x1_bin]); y2 = (float) cuCabsf(in[pair_idx * batch_size + x2_bin]); y3 = (float) cuCabsf(in[pair_idx * batch_size + x3_bin]); float denom = (x1 - x2) * (x1 - x3) * (x2 - x3); float A = (x3 * (y2 - y1) + x2 * (y1 - y3) + x1 * (y3 - y2)) / denom; float B = (x3*x3 * (y1 - y2) + x2*x2 * (y3 - y1) + x1*x1 * (y2 - y3)) / denom; //float C = (x2 * x3 * (x2 - x3) * y1 + x3 * x1 * (x3 - x1) * y2 + x1 * x2 * (x1 - x2) * y3) / denom; //printf("thread %d: %.4f\n",threadIdx.x,-B / (2*A)); //float xv = -B / (2*A); float xv = fmodf((-B / (2*A) + batch_size/2), (float) batch_size) - batch_size/2; //yv = C - B*B / (4*A); //out[pair_idx] = fmodf((-batch_size/2 + xv) - batch_size/2,batch_size); out[pair_idx] = xv; out_errors[pair_idx] = sn; //out[pair_idx] = sq_sum/n; //out_errors[pair_idx] = mean*mean; } } int mopsr_static_delay(cuFloatComplex * in, float * out, float * out_errors, unsigned npairs, unsigned batch_size, cudaStream_t stream) { struct cudaDeviceProp props; cudaGetDeviceProperties(&props,0); int nthreads = props.maxThreadsPerBlock; int nblocks = npairs/nthreads + 1; static_delay_kernel<<<nblocks,nthreads,0,stream>>>(in, out, out_errors, npairs, batch_size); #if _GDEBUG check_error_stream( "static_delay_kernel", stream); #endif return 0; } __global__ void accumulate_bandpass_kernel(cuFloatComplex* in, cuFloatComplex* accumulator, unsigned nant, unsigned nsamps, unsigned batch_size) { // in data should be in ST order //each block sums all batches for a given antenna unsigned ant = blockIdx.x; unsigned offset = ant * nsamps; unsigned pos = threadIdx.x; unsigned out_idx = ant * batch_size + pos; cuFloatComplex val = make_cuFloatComplex(0.0,0.0); //cuFloatComplex inval; //float fft_scale = (float) batch_size; // loop over all time samples for given antenna for (int ii=pos; ii<nsamps; ii+=batch_size) { // loop over batch_size to account for the condition batch_size > blockDim.x for (int jj=0; jj<batch_size; jj+=blockDim.x) //inval = in[ offset + ii + jj]; //inval.x /= fft_scale; //inval.y /= fft_scale; //val = cuCaddf( val, inval); val = cuCaddf( val, in[ offset + ii + jj] ); } // add result to the accumulator array accumulator[out_idx] = cuCaddf(val, accumulator[out_idx]); } int mopsr_accumulate_bandpass(cuFloatComplex* in, cuFloatComplex* accumulator, unsigned nant, unsigned nsamps, unsigned batch_size, cudaStream_t stream) { struct cudaDeviceProp props; cudaGetDeviceProperties(&props,0); int nthreads = props.maxThreadsPerBlock; if (batch_size < nthreads) nthreads = batch_size; #ifdef _GDEBUG fprintf (stderr, "mopsr_accumulate_bandpass: nant=%u, nsamps=%u, batch_size=%u\n", nant, nsamps, batch_size); fprintf (stderr, "mopsr_accumulate_bandpass: nblocks=%u, nthreads=%d\n", nant, nthreads); #endif accumulate_bandpass_kernel<<< nant, nthreads, 0, stream>>>(in, accumulator, nant, nsamps, batch_size); #if _GDEBUG check_error_stream("accumulate_bandpass_kernel", stream); #endif return 0; } __global__ void byte_to_float_kernel (const char * input, float * output, uint64_t size) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < size) { unsigned ii; for (ii=idx; ii<size; ii+=gridDim.x*blockDim.x) { output[ii] = ((float) input[ii]) / 127.0; } } } // convert complex 8-bit input to 32-bit int mopsr_byte_to_float (int16_t * input, cuFloatComplex * output, unsigned nsamp, unsigned nant, unsigned nchan, cudaStream_t stream) { struct cudaDeviceProp props; cudaGetDeviceProperties(&props, 0); int nthreads = props.maxThreadsPerBlock; const unsigned ndim = 2; uint64_t size = uint64_t(nsamp) * nant * nchan * ndim; int max_blocks = props.maxGridSize[0]; int nblocks; if (size/nthreads > max_blocks) nblocks = max_blocks; else nblocks = size/nthreads; #ifdef _GDEBUG fprintf (stderr, "mopsr_byte_to_float: nsamp=%lu nant=%u nchan=%u\n", nsamp, nant, nchan); fprintf (stderr, "mopsr_byte_to_float: blocks=(%d,%d,%d) nthreads=%d\n", blocks.x, blocks.y, blocks.z, nthreads); #endif byte_to_float_kernel<<<nblocks,nthreads,0,stream>>>((const char*) input, (float *)output, size); #if _GDEBUG check_error_stream("byte_to_float_kernel", stream); #endif return 0; } __global__ void transpose_TS_to_ST_kernel (const int16_t * input, cuFloatComplex * output, const uint64_t nsamp, const unsigned nant, const uint64_t nval, const unsigned nval_per_thread, const unsigned nsamp_per_block) { extern __shared__ int16_t sdata[]; const unsigned warp_num = threadIdx.x / WARP_SIZE; const unsigned warp_idx = threadIdx.x % WARP_SIZE; const unsigned offset = (warp_num * (WARP_SIZE * nval_per_thread)) + warp_idx; unsigned in_idx = (blockIdx.x * blockDim.x * nval_per_thread) + offset; unsigned sin_idx = offset; // for use in access each 8bit value int8_t * sdata8 = (int8_t *) sdata; unsigned ival; for (ival=0; ival<nval_per_thread; ival++) { if (in_idx < nval * nval_per_thread) sdata[sin_idx] = input[in_idx]; else sdata[sin_idx] = 0; in_idx += WARP_SIZE; sin_idx += WARP_SIZE; } __syncthreads(); // our thread number within the warp [0-32], also the time sample this will write each time const unsigned isamp = warp_idx; unsigned iant = warp_num * nval_per_thread; unsigned sout_idx = (isamp * nant) + iant; // block offset isamp warp offset thread offset uint64_t out_idx = (blockIdx.x * nsamp_per_block) + (nsamp * iant) + isamp; float imag, real; for (ival=0; ival<nval_per_thread; ival++) { imag = (float) sdata8[2*sout_idx]; real = (float) sdata8[2*sout_idx + 1]; //if (out_idx < nval * nval_per_thread) output[out_idx] = make_cuFloatComplex(real,imag); // update the output index out_idx += nsamp; sout_idx++; } } int mopsr_transpose_TS_to_ST(void * d_in, void * d_out, uint64_t nbytes, unsigned nant, cudaStream_t stream) { const unsigned ndim = 2; struct cudaDeviceProp props; cudaGetDeviceProperties(&props,0); unsigned nthread = props.maxThreadsPerBlock; // since we want a warp of 32 threads to write out just 1 chunk const unsigned nsamp_per_block = WARP_SIZE; const unsigned nval_per_block = nsamp_per_block * nant; // special case where not a clean multiple [TODO validate this!] if (nval_per_block % nthread) { unsigned numerator = nval_per_block; while ( numerator > nthread ) numerator /= 2; nthread = numerator; } unsigned nval_per_thread = nval_per_block / nthread; const uint64_t nsamp = nbytes / (ndim * nant); // the total number of values we have to process is const uint64_t nval = nbytes / (ndim * nval_per_thread); int nblocks = nval / nthread; if (nval % nthread) nblocks++; const size_t sdata_bytes = nthread * ndim * nval_per_thread + (2 * nant); #ifdef _GDEBUG fprintf (stderr, "mopsr_transpose_TS_to_ST: nthread=%u\n", nthread); fprintf (stderr, "mopsr_transpose_TS_to_ST: nsamp=%lu, nant=%u, nval=%lu\n", nsamp, nant, nval); fprintf (stderr, "nsamp_per_block=%u nval_per_block=%u nval_per_thread=%u\n", nsamp_per_block, nval_per_block, nval_per_thread); fprintf (stderr, "nblocks=%d, sdata_bytes=%ld\n", nblocks, sdata_bytes); #endif transpose_TS_to_ST_kernel<<<nblocks,nthread,sdata_bytes,stream>>> ((int16_t *) d_in, (cuFloatComplex *) d_out, nsamp, nant, nval, nval_per_thread, nsamp_per_block); #if _GDEBUG check_error_stream("transpose_TS_to_ST_kernel", stream); #endif return 0; } // Perform compute the SK estimator for each block of M samples, zapping them if // they exceed the threshold. __global__ void mopsr_skzap_kernel (cuFloatComplex * in, const uint64_t ndat, float M_fac, float sk_lower, float sk_upper) { extern __shared__ float sdata_sk[]; const unsigned M = blockDim.x; const unsigned i = blockIdx.x * blockDim.x + threadIdx.x; const unsigned s1 = (threadIdx.x*2); const unsigned s2 = (threadIdx.x*2) + 1; cuFloatComplex val; if (i < ndat) val = in[i]; const float power = (val.x * val.x) + (val.y * val.y); sdata_sk[s1] = power; sdata_sk[s2] = power * power; __syncthreads(); int last_offset = blockDim.x/2 + blockDim.x % 2; for (int offset = blockDim.x/2; offset > 0; offset >>= 1) { // add a partial sum upstream to our own if (threadIdx.x < offset) { sdata_sk[s1] += sdata_sk[s1 + (2*offset)]; sdata_sk[s2] += sdata_sk[s2 + (2*offset)]; } __syncthreads(); // special case for non power of 2 reductions if ((last_offset % 2) && (last_offset > 2) && (threadIdx.x == offset)) { sdata_sk[0] += sdata_sk[s1 + (2*offset)]; sdata_sk[1] += sdata_sk[s2 + (2*offset)]; } last_offset = offset; // wait until all threads in the block have updated their partial sums __syncthreads(); } // all threads read the S1 and S2 sums const float S1 = sdata_sk[0]; const float S2 = sdata_sk[1]; const float SK_estimate = M_fac * (M * (S2 / (S1 * S1)) - 1); if ((i < ndat) && ((SK_estimate > sk_upper) || (SK_estimate < sk_lower))) { in[i] = make_cuFloatComplex(0.0,0.0); } } // // Compute the S1 and S2 sums for blocks of input data, writing the S1 and S2 sums out to Gmem // __global__ void mopsr_skcompute_kernel (cuFloatComplex * in, cuFloatComplex * sums, const unsigned nval_per_thread, const uint64_t ndat, unsigned iant) { extern __shared__ float sdata_skc[]; unsigned idx = (blockIdx.x * blockDim.x + threadIdx.x) * nval_per_thread; const unsigned s1 = (threadIdx.x*2); const unsigned s2 = (threadIdx.x*2) + 1; cuFloatComplex val; float s1_sum = 0; float s2_sum = 0; float power; for (unsigned ival=0; ival<nval_per_thread; ival++) { if (idx < ndat) { val = in[idx]; //if ((iant == 3) && (blockIdx.x == 0)) // printf ("%u %f %f\n", idx, val.x, val.y); power = (val.x * val.x) + (val.y * val.y); s1_sum += power; s2_sum += (power * power); } idx += blockDim.x; } sdata_skc[s1] = s1_sum; sdata_skc[s2] = s2_sum; __syncthreads(); // This is a parallel reduction. On kepler+ cards this could be done better using // shuf_down(), but lets keep it generic for now int last_offset = blockDim.x/2 + blockDim.x % 2; for (int offset = blockDim.x/2; offset > 0; offset >>= 1) { // add a partial sum upstream to our own if (threadIdx.x < offset) { sdata_skc[s1] += sdata_skc[s1 + (2*offset)]; sdata_skc[s2] += sdata_skc[s2 + (2*offset)]; } __syncthreads(); // special case for non power of 2 reductions if ((last_offset % 2) && (last_offset > 2) && (threadIdx.x == offset)) { sdata_skc[0] += sdata_skc[s1 + (2*offset)]; sdata_skc[1] += sdata_skc[s2 + (2*offset)]; } last_offset = offset; // wait until all threads in the block have updated their partial sums __syncthreads(); } if (threadIdx.x == 0) { sums[blockIdx.x].x = sdata_skc[0]; sums[blockIdx.x].y = sdata_skc[1]; //if ((iant == 3) && (blockIdx.x == 0)) // printf ("SUM %f %f\n", sdata_skc[0], sdata_skc[1]); } } //__inline__ __device__ int warpReduceSum (int val) //{ // for (int offset = warpSize/2; offset > 0; offset /= 2) // val += __shfl_down(val, offset); // return val; //} // // take the S1 and S2 values in sums.x and sums.y that were computed from M samples, and integrate of nsums blocks to // compute a sk mask and zap // __global__ void mopsr_skmask_kernel (float * in, cuFloatComplex * sums, unsigned nsums, unsigned M, unsigned nval_per_thread, unsigned nsamp_per_thread, unsigned iant) { // Pearson Type IV SK limits for 3sigma RFI rejection, based on 2^index //const unsigned sk_idx_max = 20; const float sk_low[20] = { 0, 0, 0, 0, 0, 0.387702, 0.492078, 0.601904, 0.698159, 0.775046, 0.834186, 0.878879, 0.912209, 0.936770, 0.954684, 0.967644, 0.976961, 0.983628, 0.988382, 0.991764 }; const float sk_high[20] = { 0, 0, 0, 0, 0, 2.731480, 2.166000, 1.762970, 1.495970, 1.325420, 1.216950, 1.146930, 1.100750, 1.069730, 1.048570, 1.033980, 1.023850, 1.016780, 1.011820, 1.008340 }; /* // 4 sigma limits const float sk_low[20] = { 0, 0, 0, 0, 0, 0.274561, 0.363869, 0.492029, 0.613738, 0.711612, 0.786484, 0.843084, 0.885557, 0.917123, 0.940341, 0.957257, 0.969486, 0.978275, 0.984562, 0.989046 }; const float sk_high[20] = { 0, 0, 0, 0, 0, 4.27587, 3.11001, 2.29104, 1.784, 1.48684, 1.31218, 1.20603, 1.13893, 1.0951, 1.06577, 1.0458, 1.03204, 1.02249, 1.01582, 1.01115 }; */ // zap mask for each set of M samples extern __shared__ char smask[]; // initialize zap mask to 0 { unsigned idx = threadIdx.x; for (unsigned ival=0; ival<nval_per_thread; ival++) { if (idx < nsums) { smask[idx] = 0; idx += blockDim.x; } } } __syncthreads(); const unsigned log2_M = (unsigned) log2f (M); unsigned sk_idx_max = 20; unsigned idx = threadIdx.x; for (unsigned ival=0; ival<nval_per_thread; ival++) { if (idx < nsums) { for (unsigned sk_idx = log2_M; sk_idx < sk_idx_max; sk_idx ++) { unsigned powers_to_add = sk_idx - log2_M; unsigned to_add = (unsigned) exp2f(powers_to_add); //if ((iant == 0) && (threadIdx.x == 0)) //{ // printf ("[%d] sk_idx=%u powers_to_add=%u to_add=%u\n", ival, sk_idx, powers_to_add, to_add); //} //printf("to_add=%u\n", to_add); if (idx + to_add <= nsums) { //float s1 = sums[idx].x; //float s2 = sums[idx].y; const float m = M * to_add; const float m_fac = (m + 1) / (m - 1); //if ((iant == 0) && (threadIdx.x == 0)) // printf ("[%d] sums[%d] = (%f, %f)\n", ival, idx, s1, s2); float s1 = 0; float s2 = 0; for (unsigned ichunk=0; ichunk < to_add; ichunk++) { s1 += sums[idx + ichunk].x; s2 += sums[idx + ichunk].y; //if ((iant == 0) && (threadIdx.x == 0)) // printf ("[%d] sums[%d] = (%f, %f)\n", ival, idx+ichunk, sums[idx + ichunk].x, sums[idx + ichunk].y); } float sk_estimate = m_fac * (m * (s2 / (s1 * s1)) - 1); //if ((iant == 0) && (threadIdx.x == 0)) // printf ("[%d] total = (%f, %f), m=%f, sk=%f\n", ival, s1, s2, m, sk_estimate); //if (threadIdx.x == 0) //{ // printf ("ival=%d idx=%d s1=%e s2=%e sk=%f\n", ival, idx, s1, s2, sk_estimate); //} if ((sk_estimate < sk_low[sk_idx]) || (sk_estimate > sk_high[sk_idx])) { //if (iant == 0) // printf ("[%d][%d] s1=%e s2=%e sk_estimate=%e\n", // iant, idx, s1, s2, sk_estimate); for (unsigned ichunk=0; ichunk < to_add; ichunk++) { //if (iant == 0) // printf ("MASK: ant=%u block=%d\n", iant, idx+ichunk); smask[idx+ichunk] = 1; } } } } idx += blockDim.x; } } // sync here to be sure the smask is now updated __syncthreads(); // now we want to zap all blocks of input that have an associated mask // note that this kernel has only 1 block, with blockDim.x threads that may not match float * indat = in; nsamp_per_thread *= 2; for (unsigned isum=0; isum<nsums; isum++) { if (smask[isum] == 1) { //if ((iant == 0) && (threadIdx.x == 0)) // printf ("zapping chunk %d\n", isum); unsigned idx = threadIdx.x; for (unsigned isamp=0; isamp<nsamp_per_thread; isamp++) { if (idx < nsums) { indat[idx] = 0; idx += blockDim.x; } } } indat += 2 * M; } } // // relies on ST ordering of the data // void mopsr_skzap (float * in, uint64_t nbytes , unsigned nant, unsigned tscrunch, float sk_lower, float sk_upper, cudaStream_t stream) { //printf ("mopsr_skzap (%p, %lu, %u, %u, %f, %f)\n", in, nbytes, nant, tscrunch, sk_lower, sk_upper); const float M = (float) tscrunch; const float M_fac = (M+1) / (M-1); const unsigned ndim = 2; uint64_t ndat = nbytes / (nant * ndim * sizeof(float)); unsigned block_size = tscrunch; uint64_t nblocks = ndat / block_size; uint64_t ndat_proc = nblocks * block_size; //printf ("mopsr_skzap: ndat=%u ndat_proc=%lu block_size=%u nblocks=%lu\n", ndat, ndat_proc, block_size, nblocks); size_t shm_bytes = block_size * ndim * sizeof(float); unsigned iant; float * indat = in; for (iant=0; iant<nant; iant++) { // foreach block reduce to S1, S2 sums [out of place] //printf ("mopsr_skzap: iant=%d offset=%u M=%f M_fac=%f\n", iant, (iant * ndat * ndim), M, M_fac); mopsr_skzap_kernel<<<nblocks,block_size,shm_bytes, stream>>> ((cuFloatComplex *) indat, ndat_proc, M_fac, sk_lower, sk_upper); #ifdef _GDEBUG check_error_stream ("mopsr_skzap", stream); #endif indat += (ndat * ndim); } } // // relies on ST ordering of the data // void mopsr_skzap2 (float * in, void ** work_buffer, size_t * work_buffer_size, uint64_t nbytes , unsigned nant, unsigned tscrunch, cudaStream_t stream) { #ifdef _GDEBUG fprintf (stderr, "mopsr_skzap2 (%p, %p, %ld, %lu, %u, %u)\n", in, *work_buffer, *work_buffer_size, nbytes, nant, tscrunch); #endif unsigned nthreads = 1024; unsigned nval_per_thread = 1; if (tscrunch > nthreads) nval_per_thread = tscrunch / nthreads; else nthreads = tscrunch; // each block is a single integration const unsigned ndim = 2; uint64_t ndat = nbytes / (nant * ndim * sizeof(float)); uint64_t nblocks = ndat / tscrunch; size_t shm_bytes = tscrunch * ndim * sizeof(float); size_t bytes_req = nblocks * 2 * sizeof(float); #ifdef _GDEBUG fprintf (stderr, "mopsr_skzap2: work_buffer_size=%ld bytes_req=%ld\n", *work_buffer_size, bytes_req); #endif if (*work_buffer_size < bytes_req) { if (*work_buffer != NULL) { #ifdef _GDEBUG fprintf (stderr, "freeing work_buffer\n"); #endif cudaFree (*work_buffer); } cudaMalloc (work_buffer, bytes_req); #ifdef _GDEBUG fprintf (stderr, "mopsr_skzap2: allocated %ld bytes, ptr=%p\n", bytes_req, *work_buffer); #endif *work_buffer_size = bytes_req; } #ifdef _GDEBUG fprintf (stderr, "ndat=%lu\n", ndat); #endif unsigned nthread_mask = 1024; unsigned nval_per_thread_mask = 1; if (nblocks > nthread_mask) { nval_per_thread_mask = nblocks / nthread_mask; if (nblocks % nthread_mask) nval_per_thread_mask++; } else nthread_mask = nblocks; unsigned shm_bytes_mask = nblocks; unsigned nsamp_per_thread_mask = tscrunch / nthread_mask; if (tscrunch % nthread_mask) nsamp_per_thread_mask++; unsigned iant; float * indat = in; for (iant=0; iant<nant; iant++) { // foreach block reduce to S1, S2 sums [out of place] #ifdef _GDEBUG fprintf (stderr, "nblocks=%u, nthreads=%u, shm_bytes=%u nval_per_thread=%u ndat=%u work_buffer=%p\n", nblocks, nthreads, shm_bytes, nval_per_thread, ndat, *work_buffer); #endif mopsr_skcompute_kernel<<<nblocks, nthreads, shm_bytes, stream>>>((cuFloatComplex *) indat, (cuFloatComplex *) *work_buffer, nval_per_thread, ndat, iant); #ifdef _GDEBUG check_error_stream ("mopsr_skcompute_kernel", stream); #endif #ifdef _GDEBUG fprintf (stderr, "nthread_mask=%u shm_bytes_mask=%u nval_per_thread_mask=%u nsamp_per_thread_mask=%u\n", nthread_mask, shm_bytes_mask, nval_per_thread_mask, nsamp_per_thread_mask); #endif mopsr_skmask_kernel<<<1, nthread_mask, shm_bytes_mask, stream>>>(indat, (cuFloatComplex *) *work_buffer, nblocks, tscrunch, nval_per_thread_mask, nsamp_per_thread_mask, iant); #ifdef _GDEBUG check_error_stream ("mopsr_skmask_kernel", stream); #endif indat += (ndat * ndim); } }
9eb55cfcf78465707774ca915eb7fc46c69f8651.hip
// !!! This is a file automatically generated by hipify!!! #include"pub.h" #include"pub_main.h" #include"input.h" using namespace GS_NS; using namespace std; int main(int argn,char* args[]){ string file; ///////////////////////////////////////////////////////////// if (argn==1){ file ="in.gs"; /////////////// GV<0>::LogAndError.Init(file); ////////////// GV<0>::LogAndError<<"Since no input script assigned, default \"in.gs\" is used.\n"; }else{ file = args[1]; ///////////// GV<0>::LogAndError.Init(file); //////////// GV<0>::LogAndError<<"Input script \""<<file<<"\" is used.\n"; int device=0; if (argn==3) { io(args[2],device); hipSetDevice(device); GV<0>::LogAndError<<"Gpu device set to "<<device<<"\n"; } } ///////////////////////////////////////////////////////////// ifstream in(file.c_str(), ios::in); if (in.fail()){ GV<0>::LogAndError<<"Input script "<<file<<" is not found\n"; return -1; } istreambuf_iterator<char> beg(in), end; string script(beg, end); in.close(); ///////////////////////////////////////////////////////////// INPUT qin; // qin.standardize(script); qin.Phrasing(script); return 0; }
9eb55cfcf78465707774ca915eb7fc46c69f8651.cu
#include"pub.h" #include"pub_main.h" #include"input.h" using namespace GS_NS; using namespace std; int main(int argn,char* args[]){ string file; ///////////////////////////////////////////////////////////// if (argn==1){ file ="in.gs"; /////////////// GV<0>::LogAndError.Init(file); ////////////// GV<0>::LogAndError<<"Since no input script assigned, default \"in.gs\" is used.\n"; }else{ file = args[1]; ///////////// GV<0>::LogAndError.Init(file); //////////// GV<0>::LogAndError<<"Input script \""<<file<<"\" is used.\n"; int device=0; if (argn==3) { io(args[2],device); cudaSetDevice(device); GV<0>::LogAndError<<"Gpu device set to "<<device<<"\n"; } } ///////////////////////////////////////////////////////////// ifstream in(file.c_str(), ios::in); if (in.fail()){ GV<0>::LogAndError<<"Input script "<<file<<" is not found\n"; return -1; } istreambuf_iterator<char> beg(in), end; string script(beg, end); in.close(); ///////////////////////////////////////////////////////////// INPUT qin; // qin.standardize(script); qin.Phrasing(script); return 0; }
1cb3c05bb7647c8baaab9bbdeb6e57d1cdf8a41a.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #include <hipfft.h> #include <stdio.h> #include <stdlib.h> __global__ void DianCheng(hipfftDoubleComplex *a, hipfftDoubleComplex *b, hipfftDoubleComplex *c,int M, int L)//GPU { int tx = threadIdx.x; int by = blockIdx.y; int i=by*L+tx; if (i<=M*L) { c[i].x = a[i].x * b[i].x-a[i].y*b[i].y; c[i].y = a[i].x * b[i].y+a[i].y*b[i].x; } }
1cb3c05bb7647c8baaab9bbdeb6e57d1cdf8a41a.cu
#include <cuda_runtime.h> #include <device_launch_parameters.h> #include <cufft.h> #include <stdio.h> #include <stdlib.h> __global__ void DianCheng(cufftDoubleComplex *a, cufftDoubleComplex *b, cufftDoubleComplex *c,int M, int L)//µã³ËµÄGPUº¯Êý { int tx = threadIdx.x; int by = blockIdx.y; int i=by*L+tx; if (i<=M*L) { c[i].x = a[i].x * b[i].x-a[i].y*b[i].y; c[i].y = a[i].x * b[i].y+a[i].y*b[i].x; } }
db6c0dc4ef9d9ed03996113ef4c441411cb037f7.hip
// !!! This is a file automatically generated by hipify!!! /** CUDA ([email protected]) $ nvcc -O3 CUDA**_N-Queen.cu && ./a.out (-c|-r|-g) -c:cpu -r cpu -g GPU $ nvcc -O3 CUDA13_N-Queen.cu && ./a.out -g GPU CUDA N: Total Unique dd:hh:mm:ss.ms 4: 2 1 00:00:00:00.37 5: 10 2 00:00:00:00.00 6: 4 1 00:00:00:00.00 7: 40 6 00:00:00:00.00 8: 92 12 00:00:00:00.01 9: 352 46 00:00:00:00.01 10: 724 92 00:00:00:00.01 11: 2680 341 00:00:00:00.01 12: 14200 1787 00:00:00:00.02 13: 73712 9233 00:00:00:00.03 14: 365596 45752 00:00:00:00.03 15: 2279184 285053 00:00:00:00.04 16: 14772512 1846955 00:00:00:00.08 17: 95815104 11977939 00:00:00:00.35 18: 666090624 83263591 00:00:00:02.60 19: 4968057848 621012754 00:00:00:22.23 20: 39029188884 4878666808 00:00:03:26.80 21: 314666222712 39333324973 00:00:33:09.52 */ #include <stdio.h> #include <stdlib.h> #include <stdbool.h> #include <time.h> #include <sys/time.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #include <pthread.h> #define THREAD_NUM 96 #define MAX 27 // /** CPU/CPUR non-recursive/recursive 0 1 */ int NR; /** */ long TOTAL=0; long UNIQUE=0; // // // pthread // typedef struct { int size; int sizeE; long lTOTAL,lUNIQUE; }GCLASS, *GClass; GCLASS G; // // typedef struct{ int BOUND1,BOUND2,TOPBIT,ENDBIT,SIDEMASK,LASTMASK; int mask; int aBoard[MAX]; long COUNT2[MAX],COUNT4[MAX],COUNT8[MAX]; }local ; // __device__ __host__ int symmetryOps(int si,unsigned int *d_aBoard,int BOUND1,int BOUND2,int TOPBIT,int ENDBIT) { int own,ptn,you,bit; //90 if(d_aBoard[BOUND2]==1){ own=1; ptn=2; while(own<=si-1){ bit=1; you=si-1; while((d_aBoard[you]!=ptn)&&(d_aBoard[own]>=bit)){ bit<<=1; you--; } if(d_aBoard[own]>bit){ return 0; } else if(d_aBoard[own]<bit){ break; } own++; ptn<<=1; } /** 90180/270 */ if(own>si-1){ return 2; } } //180 if(d_aBoard[si-1]==ENDBIT){ own=1; you=si-1-1; while(own<=si-1){ bit=1; ptn=TOPBIT; while((d_aBoard[you]!=ptn)&&(d_aBoard[own]>=bit)){ bit<<=1; ptn>>=1; } if(d_aBoard[own]>bit){ return 0; } else if(d_aBoard[own]<bit){ break; } own++; you--; } /** 90180 */ if(own>si-1){ return 4; } } //270 if(d_aBoard[BOUND1]==TOPBIT){ own=1; ptn=TOPBIT>>1; while(own<=si-1){ bit=1; you=0; while((d_aBoard[you]!=ptn)&&(d_aBoard[own]>=bit)){ bit<<=1; you++; } if(d_aBoard[own]>bit){ return 0; } else if(d_aBoard[own]<bit){ break; } own++; ptn>>=1; } } return 8; } // __global__ void cuda_kernel_b1( register int size, register int mark, unsigned int* totalDown, unsigned int* totalLeft, unsigned int* totalRight, unsigned int* d_results, unsigned int* d_uniq, register int totalCond, /**11 backTrack1aBoard*********************/ //unsigned int* t_aBoard, register int h_row, /**11 BOUND1*********************/ int B1 ) { register const unsigned int mask=(1<<size)-1; register unsigned int total=0; register unsigned int unique=0; register int row=0; register unsigned int bit; // // // //ID register unsigned const int tid=threadIdx.x; //ID register unsigned const int bid=blockIdx.x; //ID register unsigned const int idx=bid*blockDim.x+tid; // // // //shared //10mask //GPU10 //THREAD_NUM __shared__ unsigned int down[THREAD_NUM][10]; down[tid][row]=totalDown[idx]; __shared__ unsigned int left[THREAD_NUM][10]; left[tid][row]=totalLeft[idx]; __shared__ unsigned int right[THREAD_NUM][10]; right[tid][row]=totalRight[idx]; __shared__ unsigned int bitmap[THREAD_NUM][10]; //down,left,rightbitmap bitmap[tid][row] =mask&~( down[tid][row] |left[tid][row] |right[tid][row]); __shared__ unsigned int sum[THREAD_NUM]; /***11 backTrack1aBoard *********************/ //unsigned int c_aBoard[MAX]; __shared__ unsigned int usum[THREAD_NUM]; // //GPUstepstotalCond if(idx<totalCond){ //totalDown,totalLeft,totalRight //down,left,right //CPU t_steps // // idx // /***11 backTrack1aBoard*********************/ //for(int i=0;i<h_row;i++){ // c_aBoard[i]=t_aBoard[idx*h_row+i]; //1 //} register unsigned int bitmap_tid_row; register unsigned int down_tid_row; register unsigned int left_tid_row; register unsigned int right_tid_row; while(row>=0){ bitmap_tid_row=bitmap[tid][row]; down_tid_row=down[tid][row]; left_tid_row=left[tid][row]; right_tid_row=right[tid][row]; if(bitmap_tid_row==0){ row--; }else{ /**11 **********/ if(row+h_row<B1) { bitmap_tid_row=bitmap[tid][row]&=~2; // bm|=2; bm^=2; (bm&=~2) } // // bitmap[tid][row] /***11 backTrack1aBoard*********************/ //^=c_aBoard[row+h_row] //=bit ^=bit =(-bitmap_tid_row&bitmap_tid_row); if((bit&mask)!=0){ //? // if(row+1==mark){ /**11 backTradk1symmetryOps*********************/ //int s=symmetryOps(size,c_aBoard); //if(s!=0){ //print(size); //print()TOTAL++ //TOTAL // unique++; total+=8; // //} row--; }else{ int rowP=row+1; down[tid][rowP]=down_tid_row|bit; left[tid][rowP]=(left_tid_row|bit)<<1; right[tid][rowP]=(right_tid_row|bit)>>1; bitmap[tid][rowP] =mask&~( down[tid][rowP] |left[tid][rowP] |right[tid][rowP]); row++; } }else{ // row--; } } } //sum[tid] sum[tid]=total; usum[tid]=unique; }else{ //totalCondtotal sum[tid]=0; usum[tid]=0; } //__syncthreads() //__syncthreads() __syncthreads();if(tid<64&&tid+64<THREAD_NUM){ sum[tid]+=sum[tid+64]; usum[tid]+=usum[tid+64]; } __syncwarp();if(tid<32){ sum[tid]+=sum[tid+32]; usum[tid]+=usum[tid+32]; } __syncwarp();if(tid<16){ sum[tid]+=sum[tid+16]; usum[tid]+=usum[tid+16]; } __syncwarp();if(tid<8){ sum[tid]+=sum[tid+8]; usum[tid]+=usum[tid+8]; } __syncwarp();if(tid<4){ sum[tid]+=sum[tid+4]; usum[tid]+=usum[tid+4]; } __syncwarp();if(tid<2){ sum[tid]+=sum[tid+2]; usum[tid]+=usum[tid+2]; } __syncwarp();if(tid<1){ sum[tid]+=sum[tid+1]; usum[tid]+=usum[tid+1]; } __syncwarp();if(tid==0){ d_results[bid]=sum[0]; d_uniq[bid]=usum[0]; } } // // /***11 cuda_kernel_b2*********************/ __global__ void cuda_kernel_b2( register int size, register int mark, unsigned int* totalDown, unsigned int* totalLeft, unsigned int* totalRight, unsigned int* d_results, unsigned int* d_uniq, register int totalCond, unsigned int* t_aBoard, register int h_row, register int B1, register int B2, register int SM, register int LM, /***12 symmetryOps TOPBIT,ENDBIT*****/ register int TB, register int EB ) { register const unsigned int mask=(1<<size)-1; register unsigned int total=0; register unsigned int unique=0; register int row=0; register unsigned int bit; // // // //ID register unsigned const int tid=threadIdx.x; //ID register unsigned const int bid=blockIdx.x; //ID register unsigned const int idx=bid*blockDim.x+tid; // // // //shared //10mask //GPU10 //THREAD_NUM __shared__ unsigned int down[THREAD_NUM][10]; down[tid][row]=totalDown[idx]; __shared__ unsigned int left[THREAD_NUM][10]; left[tid][row]=totalLeft[idx]; __shared__ unsigned int right[THREAD_NUM][10]; right[tid][row]=totalRight[idx]; __shared__ unsigned int bitmap[THREAD_NUM][10]; //down,left,rightbitmap bitmap[tid][row] =mask&~( down[tid][row] |left[tid][row] |right[tid][row]); __shared__ unsigned int sum[THREAD_NUM]; unsigned int c_aBoard[MAX]; __shared__ unsigned int usum[THREAD_NUM]; // //GPUstepstotalCond if(idx<totalCond){ //totalDown,totalLeft,totalRight //down,left,right //CPU t_steps // // idx // for(int i=0;i<h_row;i++){ c_aBoard[i]=t_aBoard[idx*h_row+i]; //1 } register unsigned int bitmap_tid_row; register unsigned int down_tid_row; register unsigned int left_tid_row; register unsigned int right_tid_row; while(row>=0){ bitmap_tid_row=bitmap[tid][row]; down_tid_row=down[tid][row]; left_tid_row=left[tid][row]; right_tid_row=right[tid][row]; // //bitmap[tid][row]=00000000 //1 if(bitmap_tid_row==0){ row--; }else{ /**11 **********/ // if(row+h_row<B1){ //printf("BOUND1_row:%d:h_row:%d:row+hrow:%d:bit:%d\n",row,h_row,row+h_row,bitmap[tid][row]); bitmap_tid_row=bitmap[tid][row]&=~SM; // }else if(row+h_row==B2) { //printf("BOUND2_row:%d:h_row:%d:row+hrow:%d:bit:%d\n",row,h_row,row+h_row,bitmap[tid][row]); if((down_tid_row&SM)==0){ row--; continue; //printf("BOUND2_row\n"); } if((down_tid_row&SM)!=SM){ bitmap_tid_row=bitmap[tid][row]&=SM; //printf("BOUND2_SIDEMASK\n"); } } int save_bitmap=bitmap[tid][row]; // // bitmap[tid][row] ^=c_aBoard[row+h_row] =bit =(-bitmap_tid_row&bitmap_tid_row); if((bit&mask)!=0){ //? // if(row+1==mark){ /***11 LASTMASK*********************/ if((save_bitmap&LM)==0){ /***12 symmetryOps BOUND1,BOUND2,TOPBIT,ENDBIT*****/ int s=symmetryOps(size,c_aBoard,B1,B2,TB,EB); if(s!=0){ //print(size); //print()TOTAL++ //TOTAL // unique++; total+=s; // } row--; } }else{ int rowP=row+1; down[tid][rowP]=down_tid_row|bit; left[tid][rowP]=(left_tid_row|bit)<<1; right[tid][rowP]=(right_tid_row|bit)>>1; bitmap[tid][rowP] =mask&~( down[tid][rowP] |left[tid][rowP] |right[tid][rowP]); row++; } }else{ // row--; } } } //sum[tid] sum[tid]=total; usum[tid]=unique; }else{ //totalCondtotal sum[tid]=0; usum[tid]=0; } //__syncthreads() //__syncthreads() __syncthreads();if(tid<64&&tid+64<THREAD_NUM){ sum[tid]+=sum[tid+64]; usum[tid]+=usum[tid+64]; } __syncwarp();if(tid<32){ sum[tid]+=sum[tid+32]; usum[tid]+=usum[tid+32]; } __syncwarp();if(tid<16){ sum[tid]+=sum[tid+16]; usum[tid]+=usum[tid+16]; } __syncwarp();if(tid<8){ sum[tid]+=sum[tid+8]; usum[tid]+=usum[tid+8]; } __syncwarp();if(tid<4){ sum[tid]+=sum[tid+4]; usum[tid]+=usum[tid+4]; } __syncwarp();if(tid<2){ sum[tid]+=sum[tid+2]; usum[tid]+=usum[tid+2]; } __syncwarp();if(tid<1){ sum[tid]+=sum[tid+1]; usum[tid]+=usum[tid+1]; } __syncwarp();if(tid==0){ d_results[bid]=sum[0]; d_uniq[bid]=usum[0]; } } // long backTrack2G(int size,int mask,int row,int n_left,int n_down,int n_right,int steps,int BOUND1,int BOUND2,int SIDEMASK,int LASTMASK,int TOPBIT,int ENDBIT,unsigned int* aBoard) { //GPUGPU /***11 size<8mark2*********************/ unsigned int mark=size>12?size-10:3; //unsigned int mark=size>11?size-9:3; if(size<8){ mark=2; } const unsigned int h_mark=row; long total=0; int totalCond=0; bool matched=false; //host unsigned int down[32]; down[row]=n_down; unsigned int right[32]; right[row]=n_right; unsigned int left[32]; left[row]=n_left; //bitmap //stack1 unsigned int bitmap[32]; bitmap[row]=mask&~(left[row]|down[row]|right[row]); unsigned int bit; unsigned int* totalDown; hipHostMalloc((void**) &totalDown,sizeof(int)*steps); unsigned int* totalLeft; hipHostMalloc((void**) &totalLeft,sizeof(int)*steps); unsigned int* totalRight; hipHostMalloc((void**) &totalRight,sizeof(int)*steps); unsigned int* h_results; hipHostMalloc((void**) &h_results,sizeof(int)*steps); unsigned int* h_uniq; hipHostMalloc((void**) &h_uniq,sizeof(int)*steps); unsigned int* t_aBoard; hipHostMalloc((void**) &t_aBoard,sizeof(int)*steps*mark); //device unsigned int* downCuda; hipMalloc((void**) &downCuda,sizeof(int)*steps); unsigned int* leftCuda; hipMalloc((void**) &leftCuda,sizeof(int)*steps); unsigned int* rightCuda; hipMalloc((void**) &rightCuda,sizeof(int)*steps); unsigned int* resultsCuda; hipMalloc((void**) &resultsCuda,sizeof(int)*steps/THREAD_NUM); unsigned int* d_uniq; hipMalloc((void**) &d_uniq,sizeof(int)*steps/THREAD_NUM); unsigned int* d_aBoard; hipMalloc((void**) &d_aBoard,sizeof(int)*steps*mark); //123CPU->row==mark 3 //down,left,right totalDown,totalLeft,totalRight // //->3GPU //13CPU //n15row=5CPU //GPU(GPU10 //) register int rowP=0; while(row>=h_mark) { //bitmap[row]=00000000 //1 //06GPU if(bitmap[row]==0){ row--; } else{// /***11 *********************/ // if(row<BOUND1){ bitmap[row]&=~SIDEMASK; // }else if(row==BOUND2) { if((down[row]&SIDEMASK)==0){ row--; } if((down[row]&SIDEMASK)!=SIDEMASK){ bitmap[row]&=SIDEMASK; } } //06SGPU bitmap[row]^=aBoard[row]=bit=(-bitmap[row]&bitmap[row]); if((bit&mask)!=0){// rowP=row+1; down[rowP]=down[row]|bit; left[rowP]=(left[row]|bit)<<1; right[rowP]=(right[row]|bit)>>1; bitmap[rowP]=mask&~(down[rowP]|left[rowP]|right[rowP]); row++; if(row==mark){ //3(mark) //down,left,right // //GPU //totalCond threadId down,left,right //row=2(13n15row=5) //totalDown,totalLeft,totalRight totalDown[totalCond]=down[row]; totalLeft[totalCond]=left[row]; totalRight[totalCond]=right[row]; for(int i=0;i<mark;i++){ t_aBoard[totalCond*mark+i]=aBoard[i]; } // totalCond++; //GPUGPUstepsGPU // //ntotalCondstepsn // //totalCond==steps if(totalCond==steps){ //matched=trueCOUNT //GPUGPU //matched=true if(matched){ hipMemcpy(h_results,resultsCuda, sizeof(int)*steps/THREAD_NUM,hipMemcpyDeviceToHost); hipMemcpy(h_uniq,d_uniq, sizeof(int)*steps/THREAD_NUM,hipMemcpyDeviceToHost); for(int col=0;col<steps/THREAD_NUM;col++){ total+=h_results[col]; UNIQUE+=h_uniq[col]; } matched=false; } hipMemcpy(downCuda,totalDown, sizeof(int)*totalCond,hipMemcpyHostToDevice); hipMemcpy(leftCuda,totalLeft, sizeof(int)*totalCond,hipMemcpyHostToDevice); hipMemcpy(rightCuda,totalRight, sizeof(int)*totalCond,hipMemcpyHostToDevice); hipMemcpy(d_aBoard,t_aBoard, sizeof(int)*totalCond*mark,hipMemcpyHostToDevice); /***12 TOPBIT,ENDBIT*********************/ //cuda_kernel_b2<<<steps/THREAD_NUM,THREAD_NUM // >>>(size,size-mark,downCuda,leftCuda,rightCuda,resultsCuda,d_uniq,totalCond,d_aBoard,row,BOUND1,BOUND2,SIDEMASK,LASTMASK); hipLaunchKernelGGL(( cuda_kernel_b2), dim3(steps/THREAD_NUM),dim3(THREAD_NUM) , 0, 0, size,size-mark,downCuda,leftCuda,rightCuda,resultsCuda,d_uniq,totalCond,d_aBoard,row,BOUND1,BOUND2,SIDEMASK,LASTMASK,TOPBIT,ENDBIT); //steps //totalCond //GPUGPUmatched=true matched=true; //totalCond==stepsGPU0 //(stepsGPU) totalCond=0; } //totalDown,totalLeft,totalRight1 // row=2 //totalDown,totalLeft,totalRight row--; } }else{ //row==markCPU //nqueen row--; } } } //matched=trueCOUNT //GPUGPU //matched=true if(matched){ hipMemcpy(h_results,resultsCuda, sizeof(int)*steps/THREAD_NUM,hipMemcpyDeviceToHost); hipMemcpy(h_uniq,d_uniq, sizeof(int)*steps/THREAD_NUM,hipMemcpyDeviceToHost); for(int col=0;col<steps/THREAD_NUM;col++){ total+=h_results[col]; UNIQUE+=h_uniq[col]; } matched=false; } hipMemcpy(downCuda,totalDown, sizeof(int)*totalCond,hipMemcpyHostToDevice); hipMemcpy(leftCuda,totalLeft, sizeof(int)*totalCond,hipMemcpyHostToDevice); hipMemcpy(rightCuda,totalRight, sizeof(int)*totalCond,hipMemcpyHostToDevice); hipMemcpy(d_aBoard,t_aBoard, sizeof(int)*totalCond*mark,hipMemcpyHostToDevice); //size-mark GPU totalCond //steps //totalCond /***12 TOPBIT,ENDBIT*********************/ //cuda_kernel_b2<<<steps/THREAD_NUM,THREAD_NUM // >>>(size,size-mark,downCuda,leftCuda,rightCuda,resultsCuda,d_uniq,totalCond,d_aBoard,mark,BOUND1,BOUND2,SIDEMASK,LASTMASK); hipLaunchKernelGGL(( cuda_kernel_b2), dim3(steps/THREAD_NUM),dim3(THREAD_NUM) , 0, 0, size,size-mark,downCuda,leftCuda,rightCuda,resultsCuda,d_uniq,totalCond,d_aBoard,mark,BOUND1,BOUND2,SIDEMASK,LASTMASK,TOPBIT,ENDBIT); hipMemcpy(h_results,resultsCuda, sizeof(int)*steps/THREAD_NUM,hipMemcpyDeviceToHost); hipMemcpy(h_uniq,d_uniq, sizeof(int)*steps/THREAD_NUM,hipMemcpyDeviceToHost); for(int col=0;col<steps/THREAD_NUM;col++){ total+=h_results[col]; UNIQUE+=h_uniq[col]; } // hipFree(downCuda); hipFree(leftCuda); hipFree(rightCuda); hipFree(resultsCuda); hipFree(d_uniq); hipFree(d_aBoard); hipHostFree(totalDown); hipHostFree(totalLeft); hipHostFree(totalRight); hipHostFree(h_results); hipHostFree(h_uniq); hipHostFree(t_aBoard); return total; } // long backTrack1G(int size,int mask,int row,int n_left,int n_down,int n_right,int steps,int BOUND1) { //GPUGPU /***08 mark3*********************/ const unsigned int mark=size>12?size-10:3; const unsigned int h_mark=row; long total=0; int totalCond=0; bool matched=false; //host unsigned int down[32]; down[row]=n_down; unsigned int right[32]; right[row]=n_right; unsigned int left[32]; left[row]=n_left; //bitmap //stack1 unsigned int bitmap[32]; bitmap[row]=mask&~(left[row]|down[row]|right[row]); unsigned int bit; unsigned int* totalDown; hipHostMalloc((void**) &totalDown,sizeof(int)*steps); unsigned int* totalLeft; hipHostMalloc((void**) &totalLeft,sizeof(int)*steps); unsigned int* totalRight; hipHostMalloc((void**) &totalRight,sizeof(int)*steps); unsigned int* h_results; hipHostMalloc((void**) &h_results,sizeof(int)*steps); unsigned int* h_uniq; hipHostMalloc((void**) &h_uniq,sizeof(int)*steps); /***11 backTrack1aBoard*********************/ //unsigned int* t_aBoard; //hipHostMalloc((void**) &t_aBoard,sizeof(int)*steps*mark); //device unsigned int* downCuda; hipMalloc((void**) &downCuda,sizeof(int)*steps); unsigned int* leftCuda; hipMalloc((void**) &leftCuda,sizeof(int)*steps); unsigned int* rightCuda; hipMalloc((void**) &rightCuda,sizeof(int)*steps); unsigned int* resultsCuda; hipMalloc((void**) &resultsCuda,sizeof(int)*steps/THREAD_NUM); unsigned int* d_uniq; hipMalloc((void**) &d_uniq,sizeof(int)*steps/THREAD_NUM); /***11 backTrack1aBoard*********************/ //unsigned int* d_aBoard; //hipMalloc((void**) &d_aBoard,sizeof(int)*steps*mark); //123CPU->row==mark 3 //down,left,right totalDown,totalLeft,totalRight // //->3GPU //13CPU //n15row=5CPU //GPU(GPU10 //) //while(row>=0) { register int rowP=0; while(row>=h_mark) { //bitmap[row]=00000000 //1 //06GPU if(bitmap[row]==0){ row--; } else{// /***11 *********************/ if(row<BOUND1) { bitmap[row]&=~2; // bm|=2; bm^=2; (bm&=~2) } //06SGPU /***11 aBoard*********************/ //bitmap[row]^=aBoard[row]=bit=(-bitmap[row]&bitmap[row]); bitmap[row]^=bit=(-bitmap[row]&bitmap[row]); if((bit&mask)!=0){// rowP=row+1; down[rowP]=down[row]|bit; left[rowP]=(left[row]|bit)<<1; right[rowP]=(right[row]|bit)>>1; bitmap[rowP]=mask&~(down[rowP]|left[rowP]|right[rowP]); row++; if(row==mark){ //3(mark) //down,left,right // //GPU //totalCond threadId down,left,right //row=2(13n15row=5) //totalDown,totalLeft,totalRight totalDown[totalCond]=down[row]; totalLeft[totalCond]=left[row]; totalRight[totalCond]=right[row]; /***11 aBoard*********************/ //for(int i=0;i<mark;i++){ // t_aBoard[totalCond*mark+i]=aBoard[i]; //} // totalCond++; //GPUGPUstepsGPU // //ntotalCondstepsn // //totalCond==steps if(totalCond==steps){ //matched=trueCOUNT //GPUGPU //matched=true if(matched){ hipMemcpy(h_results,resultsCuda, sizeof(int)*steps/THREAD_NUM,hipMemcpyDeviceToHost); hipMemcpy(h_uniq,d_uniq, sizeof(int)*steps/THREAD_NUM,hipMemcpyDeviceToHost); for(int col=0;col<steps/THREAD_NUM;col++){ total+=h_results[col]; UNIQUE+=h_uniq[col]; } matched=false; } hipMemcpy(downCuda,totalDown, sizeof(int)*totalCond,hipMemcpyHostToDevice); hipMemcpy(leftCuda,totalLeft, sizeof(int)*totalCond,hipMemcpyHostToDevice); hipMemcpy(rightCuda,totalRight, sizeof(int)*totalCond,hipMemcpyHostToDevice); /***11 aBoard*********************/ //hipMemcpy(d_aBoard,t_aBoard, // sizeof(int)*totalCond*mark,hipMemcpyHostToDevice); /***11 BOUND1*********************/ //cuda_kernel<<<steps/THREAD_NUM,THREAD_NUM // >>>(size,size-mark,downCuda,leftCuda,rightCuda,resultsCuda,d_uniq,totalCond,d_aBoard,row); hipLaunchKernelGGL(( cuda_kernel_b1), dim3(steps/THREAD_NUM),dim3(THREAD_NUM) , 0, 0, size,size-mark,downCuda,leftCuda,rightCuda,resultsCuda,d_uniq,totalCond,row,BOUND1); //steps //totalCond //GPUGPUmatched=true matched=true; //totalCond==stepsGPU0 //(stepsGPU) totalCond=0; } //totalDown,totalLeft,totalRight1 // row=2 //totalDown,totalLeft,totalRight row--; } }else{ //row==markCPU //nqueen row--; } } } //matched=trueCOUNT //GPUGPU //matched=true if(matched){ hipMemcpy(h_results,resultsCuda, sizeof(int)*steps/THREAD_NUM,hipMemcpyDeviceToHost); hipMemcpy(h_uniq,d_uniq, sizeof(int)*steps/THREAD_NUM,hipMemcpyDeviceToHost); for(int col=0;col<steps/THREAD_NUM;col++){ total+=h_results[col]; UNIQUE+=h_uniq[col]; } matched=false; } hipMemcpy(downCuda,totalDown, sizeof(int)*totalCond,hipMemcpyHostToDevice); hipMemcpy(leftCuda,totalLeft, sizeof(int)*totalCond,hipMemcpyHostToDevice); hipMemcpy(rightCuda,totalRight, sizeof(int)*totalCond,hipMemcpyHostToDevice); /***11 aBoard*********************/ //hipMemcpy(d_aBoard,t_aBoard, // sizeof(int)*totalCond*mark,hipMemcpyHostToDevice); //size-mark GPU totalCond //steps //totalCond /***11 BOUND1*********************/ //cuda_kernel<<<steps/THREAD_NUM,THREAD_NUM // >>>(size,size-mark,downCuda,leftCuda,rightCuda,resultsCuda,d_uniq,totalCond,d_aBoard,mark); hipLaunchKernelGGL(( cuda_kernel_b1), dim3(steps/THREAD_NUM),dim3(THREAD_NUM) , 0, 0, size,size-mark,downCuda,leftCuda,rightCuda,resultsCuda,d_uniq,totalCond,mark,BOUND1); hipMemcpy(h_results,resultsCuda, sizeof(int)*steps/THREAD_NUM,hipMemcpyDeviceToHost); hipMemcpy(h_uniq,d_uniq, sizeof(int)*steps/THREAD_NUM,hipMemcpyDeviceToHost); for(int col=0;col<steps/THREAD_NUM;col++){ total+=h_results[col]; UNIQUE+=h_uniq[col]; } // hipFree(downCuda); hipFree(leftCuda); hipFree(rightCuda); hipFree(resultsCuda); hipFree(d_uniq); /***11 aBoard**/ //hipFree(d_aBoard); hipHostFree(totalDown); hipHostFree(totalLeft); hipHostFree(totalRight); hipHostFree(h_results); hipHostFree(h_uniq); /***11 aBoard**/ //hipHostFree(t_aBoard); return total; } // //GPU void NQueenG(register int size,register int steps) { if(size<=0||size>32){return;} /** register int unsigned total: TOTAL sizeE:size-1 */ unsigned int total=0; unsigned int sizeE=size-1; register unsigned int aBoard[MAX]; register int bit=0; register int mask=((1<<size)-1); int col=0;//1 0 aBoard[0]=bit=(1<<col); register int left=bit<<1,down=bit,right=bit>>1; /** 232 */ for(register int BOUND1=2;BOUND1<sizeE;BOUND1++){ aBoard[1]=bit=(1<<BOUND1); total+=backTrack1G(size,mask,2, (left|bit)<<1,(down|bit),(right|bit)>>1, steps,BOUND1); } register int LASTMASK,SIDEMASK; register int TOPBIT=1<<(sizeE); SIDEMASK=LASTMASK=(TOPBIT|1); register int ENDBIT=(TOPBIT>>1); /** 12 1/2 n=8 1,2,3 1/2+1 n=9 1,2,3,4 */ for(register int BOUND1=1,BOUND2=sizeE-1;BOUND1<BOUND2;BOUND1++,BOUND2--){ aBoard[0]=bit=(1<<BOUND1); total+=backTrack2G(size,mask,1, bit<<1,bit,bit>>1, steps,BOUND1,BOUND2,SIDEMASK,LASTMASK,TOPBIT,ENDBIT,aBoard); LASTMASK|=LASTMASK>>1|LASTMASK<<1; ENDBIT>>=1; } /** */ TOTAL=total; } /** CUDA **/ bool InitCUDA() { int count; hipGetDeviceCount(&count); if(count==0){fprintf(stderr,"There is no device.\n");return false;} int i; for(i=0;i<count;i++){ hipDeviceProp_t prop; if(hipGetDeviceProperties(&prop,i)==hipSuccess){if(prop.major>=1){break;} } } if(i==count){fprintf(stderr,"There is no device supporting CUDA 1.x.\n");return false;} hipSetDevice(i); return true; } // void symmetryOps(local *l) { int own,ptn,you,bit; //90 if(l->aBoard[l->BOUND2]==1){ own=1; ptn=2; while(own<=G.sizeE){ bit=1; you=G.sizeE; while((l->aBoard[you]!=ptn)&&(l->aBoard[own]>=bit)){ bit<<=1; you--; } if(l->aBoard[own]>bit){ return; } if(l->aBoard[own]<bit){ break; } own++; ptn<<=1; } /** 90180/270 */ if(own>G.sizeE){ l->COUNT2[l->BOUND1]++; return; } } //180 if(l->aBoard[G.sizeE]==l->ENDBIT){ own=1; you=G.sizeE-1; while(own<=G.sizeE){ bit=1; ptn=l->TOPBIT; while((l->aBoard[you]!=ptn)&&(l->aBoard[own]>=bit)){ bit<<=1; ptn>>=1; } if(l->aBoard[own]>bit){ return; } if(l->aBoard[own]<bit){ break; } own++; you--; } /** 90180 */ if(own>G.sizeE){ l->COUNT4[l->BOUND1]++; return; } } //270 if(l->aBoard[l->BOUND1]==l->TOPBIT){ own=1; ptn=l->TOPBIT>>1; while(own<=G.sizeE){ bit=1; you=0; while((l->aBoard[you]!=ptn)&&(l->aBoard[own]>=bit)){ bit<<=1; you++; } if(l->aBoard[own]>bit){ return; } if(l->aBoard[own]<bit){ break; } own++; ptn>>=1; } } l->COUNT8[l->BOUND1]++; } // //CPU backTrack2// void backTrack2_NR(int row,int h_left,int h_down,int h_right,local *l) { unsigned int left[G.size]; unsigned int down[G.size]; unsigned int right[G.size]; unsigned int bitmap[G.size]; left[row]=h_left; down[row]=h_down; right[row]=h_right; bitmap[row]=l->mask&~(left[row]|down[row]|right[row]); unsigned int bit; int mark=row; // while(row>=mark){//row=1 row>=1, row=2 row>=2 if(bitmap[row]==0){ --row; }else{ // if(row<l->BOUND1){ bitmap[row]&=~l->SIDEMASK; // }else if(row==l->BOUND2) { if((down[row]&l->SIDEMASK)==0){ row--; } if((down[row]&l->SIDEMASK)!=l->SIDEMASK){ bitmap[row]&=l->SIDEMASK; } } int save_bitmap=bitmap[row]; bitmap[row]^=l->aBoard[row]=bit=(-bitmap[row]&bitmap[row]); if((bit&l->mask)!=0){ if(row==G.sizeE){ if((save_bitmap&l->LASTMASK)==0){ symmetryOps(l); --row; } }else{ int n=row++; left[row]=(left[n]|bit)<<1; down[row]=down[n]|bit; right[row]=(right[n]|bit)>>1; bitmap[row]=l->mask&~(left[row]|down[row]|right[row]); } }else{ --row; } } } } // // CPU backTrack2 void backTrack2D_NR(int row,int left,int down,int right,local *l) { int bitmap,bit; int b[100], *p=b; int odd=G.size&1; //:1 :0 for(int i=0;i<(1+odd);++i){ bitmap=0; if(0==i){ int half=G.size>>1; // size/2 bitmap=(1<<half)-1; }else{ bitmap=1<<(G.size>>1); // down[1]=bitmap; // right[1]=(bitmap>>1); // left[1]=(bitmap<<1); // pnStack=aStack+1; // *pnStack++=0; } mais1:bitmap=l->mask&~(left|down|right); // if(row==G.sizeE){ if(bitmap){ // if((bitmap&l->LASTMASK)==0){ l->aBoard[row]=bitmap; symmetryOps(l); } } }else{ // if(row<l->BOUND1){ bitmap&=~l->SIDEMASK; // }else if(row==l->BOUND2){ if(!(down&l->SIDEMASK)) goto volta; if((down&l->SIDEMASK)!=l->SIDEMASK) bitmap&=l->SIDEMASK; } if(bitmap){ outro:bitmap^=l->aBoard[row]=bit=-bitmap&bitmap; if(bitmap){ *p++=left; *p++=down; *p++=right; } *p++=bitmap; row++; left=(left|bit)<<1; down=down|bit; right=(right|bit)>>1; goto mais1; //Backtrack2(y+1, (left | bit)<<1, down | bit, (right | bit)>>1); volta:if(p<=b) return; row--; bitmap=*--p; if(bitmap){ right=*--p; down=*--p; left=*--p; goto outro; }else{ goto volta; } } } goto volta; } } //CPU backTrack void backTrack1_NR(int row,int h_left,int h_down,int h_right,local *l) { unsigned int left[G.size]; unsigned int down[G.size]; unsigned int right[G.size]; unsigned int bitmap[G.size]; left[row]=h_left; down[row]=h_down; right[row]=h_right; bitmap[row]=l->mask&~(left[row]|down[row]|right[row]); unsigned int bit; int mark=row; // while(row>=mark){//row=1 row>=1, row=2 row>=2 if(bitmap[row]==0){ --row; }else{ if(row<l->BOUND1) { bitmap[row]&=~2; // bm|=2; bm^=2; (bm&=~2) } bitmap[row]^=l->aBoard[row]=bit=(-bitmap[row]&bitmap[row]); if((bit&l->mask)!=0){ if(row==G.sizeE){ l->COUNT8[l->BOUND1]++; --row; }else{ int n=row++; left[row]=(left[n]|bit)<<1; down[row]=down[n]|bit; right[row]=(right[n]|bit)>>1; bitmap[row]=l->mask&~(left[row]|down[row]|right[row]); } }else{ --row; } } } } // CPU backTrack void backTrack1D_NR(int row,int left,int down,int right,local *l) { int bitmap,bit; int b[100], *p=b; int odd=G.size&1; //:1 :0 for(int i=0;i<(1+odd);++i){ bitmap=0; if(0==i){ int half=G.size>>1; // size/2 bitmap=(1<<half)-1; }else{ bitmap=1<<(G.size>>1); // down[1]=bitmap; // right[1]=(bitmap>>1); // left[1]=(bitmap<<1); // pnStack=aStack+1; // *pnStack++=0; } b1mais1:bitmap=l->mask&~(left|down|right); // if(row==G.sizeE){ if(bitmap){ // l->aBoard[row]=bitmap; l->COUNT8[l->BOUND1]++; } }else{ // // if(row<l->BOUND1) { bitmap&=~2; // bm|=2; bm^=2; (bm&=~2) } if(bitmap){ b1outro:bitmap^=l->aBoard[row]=bit=-bitmap&bitmap; if(bitmap){ *p++=left; *p++=down; *p++=right; } *p++=bitmap; row++; left=(left|bit)<<1; down=down|bit; right=(right|bit)>>1; goto b1mais1; //Backtrack1(y+1, (left | bit)<<1, down | bit, (right | bit)>>1); b1volta:if(p<=b) return; row--; bitmap=*--p; if(bitmap){ right=*--p; down=*--p; left=*--p; goto b1outro; }else{ goto b1volta; } } } goto b1volta; } } // //CPU backTrack void backTrack2(int row,int left,int down,int right,local *l) { int bitmap=0; int bit=0; bitmap=(l->mask&~(left|down|right)); if(row==G.sizeE){ if(bitmap){ // if((bitmap&l->LASTMASK)==0){ l->aBoard[row]=(-bitmap&bitmap); symmetryOps(l); } } }else{ // if(row<l->BOUND1){ bitmap&=~l->SIDEMASK; // }else if(row==l->BOUND2) { if((down&l->SIDEMASK)==0){ return; } if((down&l->SIDEMASK)!=l->SIDEMASK){ bitmap&=l->SIDEMASK; } } while(bitmap){ bitmap^=l->aBoard[row]=bit=(-bitmap&bitmap); backTrack2(row+1,(left|bit)<<1, down|bit,(right|bit)>>1,l); } } } // CPU backTrack void backTrack2D(int row,int left,int down,int right,local *l) { int bit; int bitmap=l->mask&~(left|down|right); if(row==G.sizeE){ // if(bitmap){ if((bitmap&l->LASTMASK)==0){ // l->aBoard[row]=bitmap; symmetryOps(l); } } }else{ if(row<l->BOUND1){ // bitmap&=~l->SIDEMASK; }else if(row==l->BOUND2) { // if((down&l->SIDEMASK)==0){ return; } if((down&l->SIDEMASK)!=l->SIDEMASK){ bitmap&=l->SIDEMASK; } } while(bitmap){ bitmap^=l->aBoard[row]=bit=(-bitmap&bitmap); backTrack2D(row+1,(left|bit)<<1,down|bit,(right|bit)>>1,l); } } } // //CPU backTrack void backTrack1(int row,int left,int down,int right,local *l) { int bitmap=0; int bit=0; bitmap=(l->mask&~(left|down|right)); if(row==G.sizeE){ if(bitmap){ l->COUNT8[l->BOUND1]++; } }else{ if(row<l->BOUND1) { bitmap&=~2; // bm|=2; bm^=2; (bm&=~2) } while(bitmap){ bitmap^=l->aBoard[row]=bit=(-bitmap&bitmap); backTrack1(row+1,(left|bit)<<1, down|bit,(right|bit)>>1,l); } } } // CPU backTrack void backTrack1D(int row,int left,int down,int right,local *l) { int bit; int bitmap=l->mask&~(left|down|right); // if(row==G.sizeE) { if(bitmap){ /* l->aBoard[row]=bitmap; */ l->COUNT8[l->BOUND1]++; } }else{ // // if(row<l->BOUND1) { bitmap&=~2; // bm|=2; bm^=2; (bm&=~2) } while(bitmap){ bitmap^=l->aBoard[row]=bit=(-bitmap&bitmap); backTrack1D(row+1,(left|bit)<<1,down|bit,(right|bit)>>1,l); } } } // void *run(void *args) { /** // typedef struct { int size; int sizeE; long lTOTAL,lUNIQUE; }GCLASS, *GClass; GCLASS G; */ local *l=(local *)args; /** */ int bit=0; int col=0; if(l->BOUND1>1 && l->BOUND1<G.sizeE) { l->aBoard[0]=bit=(1<<col); int left=bit<<1;int down=bit;int right=bit>>1; if(l->BOUND1<G.sizeE) { col=l->BOUND1;// l->aBoard[1]=bit=(1<<col); if(NR==1){// backTrack1_NR(2,(left|bit)<<1,(down|bit),(right|bit)>>1,l);//GPU //backTrack1D_NR(2,(left|bit)<<1,(down|bit),(right|bit)>>1,l); }else{// backTrack1(2,(left|bit)<<1,(down|bit),(right|bit)>>1,l);//GPU //backTrack1D(2,(left|bit)<<1,(down|bit),(right|bit)>>1,l);// } } } l->TOPBIT=1<<(G.sizeE); l->ENDBIT=(l->TOPBIT>>l->BOUND1); l->SIDEMASK=l->LASTMASK=(l->TOPBIT|1); /** */ if(l->BOUND1>0&&l->BOUND2<G.sizeE&&l->BOUND1<l->BOUND2){ for(int i=1; i<l->BOUND1; i++){ l->LASTMASK=l->LASTMASK|l->LASTMASK>>1|l->LASTMASK<<1; } if(l->BOUND1<l->BOUND2){ int col=l->BOUND1; l->aBoard[0]=bit=(1<<col); if(NR==1){// backTrack2_NR(1,bit<<1,bit,bit>>1,l); //GPU //backTrack2D_NR(1,bit<<1,bit,bit>>1,l);// }else{// backTrack2(1,bit<<1,bit,bit>>1,l); //GPU //backTrack2D(1,bit<<1,bit,bit>>1,l);// } } l->ENDBIT>>=G.size; } return 0;//*run()return 0; } //pthread void *NQueenThread() { /** // typedef struct{ int BOUND1,BOUND2,TOPBIT,ENDBIT,SIDEMASK,LASTMASK; int mask; int aBoard[MAX]; long COUNT2[MAX],COUNT4[MAX],COUNT8[MAX]; }local ; */ local l[MAX];// local /** pthread */ pthread_t pt[G.size]; /** */ for(int BOUND1=G.sizeE,BOUND2=0;BOUND2<G.sizeE;BOUND1--,BOUND2++){ /** aBoard */ l[BOUND1].mask=(1<<G.size)-1; l[BOUND1].BOUND1=BOUND1;l[BOUND1].BOUND2=BOUND2;//B1 B2 for(int j=0;j<G.size;j++){ l[l->BOUND1].aBoard[j]=j; }// aB[] l[BOUND1].COUNT2[BOUND1]=l[BOUND1].COUNT4[BOUND1]= l[BOUND1].COUNT8[BOUND1]=0;// /** pthread_create BOUND1N run() */ int iFbRet=pthread_create(&pt[BOUND1],NULL,&run,&l[BOUND1]); if(iFbRet>0){ printf("[mainThread] pthread_create #%d: %d\n", l[BOUND1].BOUND1, iFbRet); } } /** join() */ for(int BOUND1=G.sizeE,BOUND2=0;BOUND2<G.sizeE;BOUND1--,BOUND2++){ pthread_join(pt[BOUND1],NULL); } // for(int BOUND1=G.sizeE,BOUND2=0;BOUND2<G.sizeE;BOUND1--,BOUND2++){ G.lTOTAL+=l[BOUND1].COUNT2[BOUND1]*2+ l[BOUND1].COUNT4[BOUND1]*4+l[BOUND1].COUNT8[BOUND1]*8; G.lUNIQUE+=l[BOUND1].COUNT2[BOUND1]+ l[BOUND1].COUNT4[BOUND1]+l[BOUND1].COUNT8[BOUND1]; } return 0; } // CPU/CPUR(pthread) void NQueen() { /** CUDA pthread C C13_N-Queen.c pthread //iFbRet = pthread_create(&pth, NULL,&NQueenThread,NULL); */ pthread_t pth; // int iFbRet; //pthread //iFbRet = pthread_create(&pth, NULL,&NQueenThread,NULL); // if(iFbRet>0){ printf("[main] pthread_create: %d\n", iFbRet); // } pthread_join(pth,NULL); /* join */ } // int main(int argc,char** argv) { /** $ nvcc -O3 CUDA13_N-Queen.cu && ./a.out (-c|-r|-g|-s) -c:cpu -r cpu -g GPU -s SGPU() */ bool cpu=false,cpur=false,gpu=false,sgpu=false; int argstart=1; if(argc>=2&&argv[1][0]=='-'){ if(argv[1][1]=='c'||argv[1][1]=='C'){cpu=true;} else if(argv[1][1]=='r'||argv[1][1]=='R'){cpur=true;} else if(argv[1][1]=='c'||argv[1][1]=='C'){cpu=true;} else if(argv[1][1]=='g'||argv[1][1]=='G'){gpu=true;} else if(argv[1][1]=='s'||argv[1][1]=='S'){sgpu=true;} else{ gpu=true; } //gpu argstart=2; } if(argc<argstart){ printf("Usage: %s [-c|-g|-r|-s] n steps\n",argv[0]); printf(" -r: CPUR only\n"); printf(" -c: CPU only\n"); printf(" -g: GPU only\n"); printf(" -s: SGPU only\n"); printf("Default to 8 queen\n"); } /** $ nvcc pthread cpu/cpurc # //iFbRet = pthread_create(&pth, NULL,&NQueenThread,NULL); # .cu .c CUDA13_N-Queen.cu -> CUDA13_N-Queen.c //iFbRet = pthread_create(&pth, NULL,&NQueenThread,NULL); $ gcc -Wall -W -O3 -g -ftrapv -std=c99 -pthread CUDA13_N-Queen.c && ./a.out [-c|-r] */ if(cpu){ printf("\n\nCPU \n"); printf("pthread\nnvccpthread\n"); }else if(cpur){ printf("\n\nCPUR \n"); printf("pthread\nnvccpthread\n"); }else if(gpu){ printf("\n\nGPU CUDA\n"); }else if(sgpu){ printf("\n\nSGPU CUDA\n"); } /** CPUCPUCPURCPU */ if(cpu||cpur) { int min=4; int targetN=17;//NN /** */ struct timeval t0; struct timeval t1; printf("%s\n"," N: Total Unique dd:hh:mm:ss.ms"); for(int i=min;i<=targetN;i++){ /** size/sizeE/lTOTAL/lUNIQUE typedef struct { int size; int sizeE; long lTOTAL,lUNIQUE; }GCLASS, *GClass; GCLASS G; */ G.size=i;G.sizeE=i-1;//size sizeE G.lTOTAL=G.lUNIQUE=0;//TOTAL UNIQUE // gettimeofday(&t0, NULL);// /** CPU/CPUR non-recursive/recursive 0 1 */ if(cpur){ // //NR=0;NQueenD(); NR=0;NQueen(); } if(cpu){ // //NR=1;NQueenD(); NR=1;NQueen(); } // gettimeofday(&t1, NULL);// /** Total Unique dd:hh:mm:ss.ms 15: 2279184 285053 00:00:00:00.33 16: 14772512 1846955 00:00:00:01.59 17: 95815104 11977939 00:00:00:10.92 */ int ss;int ms;int dd; if(t1.tv_usec<t0.tv_usec) { dd=(t1.tv_sec-t0.tv_sec-1)/86400; ss=(t1.tv_sec-t0.tv_sec-1)%86400; ms=(1000000+t1.tv_usec-t0.tv_usec+500)/10000; }else { dd=(t1.tv_sec-t0.tv_sec)/86400; ss=(t1.tv_sec-t0.tv_sec)%86400; ms=(t1.tv_usec-t0.tv_usec+500)/10000; }//end if int hh=ss/3600; int mm=(ss-hh*3600)/60; ss%=60; /** */ printf("%2d:%16ld%17ld%12.2d:%02d:%02d:%02d.%02d\n", i,G.lTOTAL,G.lUNIQUE,dd,hh,mm,ss,ms); } //end for }//end if /** GPUGPUSGPU GPURGPU */ if(gpu||sgpu) { /** CUDA */ if(!InitCUDA()){return 0;} int steps=24576; int min=4;int targetN=21;//NN /** */ struct timeval t0; struct timeval t1; /** */ printf("%s\n"," N: Total Unique dd:hh:mm:ss.ms"); /** */ for(int i=min;i<=targetN;i++){ gettimeofday(&t0,NULL); // if(gpu){// TOTAL=0; UNIQUE=0; NQueenG(i,steps); } gettimeofday(&t1,NULL); // /** Total Unique dd:hh:mm:ss.ms 15: 2279184 285053 00:00:00:00.33 16: 14772512 1846955 00:00:00:01.59 17: 95815104 11977939 00:00:00:10.92 */ int ss;int ms;int dd; if (t1.tv_usec<t0.tv_usec) { dd=(int)(t1.tv_sec-t0.tv_sec-1)/86400; ss=(t1.tv_sec-t0.tv_sec-1)%86400; ms=(1000000+t1.tv_usec-t0.tv_usec+500)/10000; } else { dd=(int)(t1.tv_sec-t0.tv_sec)/86400; ss=(t1.tv_sec-t0.tv_sec)%86400; ms=(t1.tv_usec-t0.tv_usec+500)/10000; }//end if int hh=ss/3600; int mm=(ss-hh*3600)/60; ss%=60; printf("%2d:%13ld%16ld%4.2d:%02d:%02d:%02d.%02d\n", i,TOTAL,UNIQUE,dd,hh,mm,ss,ms); }//end for }//end if return 0; }
db6c0dc4ef9d9ed03996113ef4c441411cb037f7.cu
/** CUDAで学ぶアルゴリズムとデータ構造 ステップバイステップでN−クイーン問題を最適化 一般社団法人 共同通信社 情報技術局 鈴木 維一郎([email protected]) コンパイルと実行 $ nvcc -O3 CUDA**_N-Queen.cu && ./a.out (-c|-r|-g) -c:cpu -r cpu再帰 -g GPU $ nvcc -O3 CUDA13_N-Queen.cu && ./a.out -g 13.GPU 非再帰 並列処理 CUDA N: Total Unique dd:hh:mm:ss.ms 4: 2 1 00:00:00:00.37 5: 10 2 00:00:00:00.00 6: 4 1 00:00:00:00.00 7: 40 6 00:00:00:00.00 8: 92 12 00:00:00:00.01 9: 352 46 00:00:00:00.01 10: 724 92 00:00:00:00.01 11: 2680 341 00:00:00:00.01 12: 14200 1787 00:00:00:00.02 13: 73712 9233 00:00:00:00.03 14: 365596 45752 00:00:00:00.03 15: 2279184 285053 00:00:00:00.04 16: 14772512 1846955 00:00:00:00.08 17: 95815104 11977939 00:00:00:00.35 18: 666090624 83263591 00:00:00:02.60 19: 4968057848 621012754 00:00:00:22.23 20: 39029188884 4878666808 00:00:03:26.80 21: 314666222712 39333324973 00:00:33:09.52 */ #include <stdio.h> #include <stdlib.h> #include <stdbool.h> #include <time.h> #include <sys/time.h> #include <cuda.h> #include <cuda_runtime.h> #include <device_launch_parameters.h> #include <pthread.h> #define THREAD_NUM 96 #define MAX 27 //変数宣言 /** CPU/CPURの場合において、 バックトラックをnon-recursive/recursiveのいずれかを選択 再帰:0 非再帰:1 */ int NR; /** グローバル変数として合計解とユニーク解を格納する変数 */ long TOTAL=0; long UNIQUE=0; // //変数宣言 // pthreadはパラメータを1つしか渡せないので構造体に格納 //グローバル構造体 typedef struct { int size; int sizeE; long lTOTAL,lUNIQUE; }GCLASS, *GClass; GCLASS G; // //ローカル構造体 typedef struct{ int BOUND1,BOUND2,TOPBIT,ENDBIT,SIDEMASK,LASTMASK; int mask; int aBoard[MAX]; long COUNT2[MAX],COUNT4[MAX],COUNT8[MAX]; }local ; // __device__ __host__ int symmetryOps(int si,unsigned int *d_aBoard,int BOUND1,int BOUND2,int TOPBIT,int ENDBIT) { int own,ptn,you,bit; //90度回転 if(d_aBoard[BOUND2]==1){ own=1; ptn=2; while(own<=si-1){ bit=1; you=si-1; while((d_aBoard[you]!=ptn)&&(d_aBoard[own]>=bit)){ bit<<=1; you--; } if(d_aBoard[own]>bit){ return 0; } else if(d_aBoard[own]<bit){ break; } own++; ptn<<=1; } /** 90度回転して同型なら180度/270度回転も同型である */ if(own>si-1){ return 2; } } //180度回転 if(d_aBoard[si-1]==ENDBIT){ own=1; you=si-1-1; while(own<=si-1){ bit=1; ptn=TOPBIT; while((d_aBoard[you]!=ptn)&&(d_aBoard[own]>=bit)){ bit<<=1; ptn>>=1; } if(d_aBoard[own]>bit){ return 0; } else if(d_aBoard[own]<bit){ break; } own++; you--; } /** 90度回転が同型でなくても180度回転が同型である事もある */ if(own>si-1){ return 4; } } //270度回転 if(d_aBoard[BOUND1]==TOPBIT){ own=1; ptn=TOPBIT>>1; while(own<=si-1){ bit=1; you=0; while((d_aBoard[you]!=ptn)&&(d_aBoard[own]>=bit)){ bit<<=1; you++; } if(d_aBoard[own]>bit){ return 0; } else if(d_aBoard[own]<bit){ break; } own++; ptn>>=1; } } return 8; } // __global__ void cuda_kernel_b1( register int size, register int mark, unsigned int* totalDown, unsigned int* totalLeft, unsigned int* totalRight, unsigned int* d_results, unsigned int* d_uniq, register int totalCond, /**11 backTrack1ではaBoard不要のためコメント*********************/ //unsigned int* t_aBoard, register int h_row, /**11 BOUND1追加*********************/ int B1 ) { register const unsigned int mask=(1<<size)-1; register unsigned int total=0; register unsigned int unique=0; register int row=0; register unsigned int bit; // //スレッド // //ブロック内のスレッドID register unsigned const int tid=threadIdx.x; //グリッド内のブロックID register unsigned const int bid=blockIdx.x; //全体通してのID register unsigned const int idx=bid*blockDim.x+tid; // //シェアードメモリ // //sharedメモリを使う ブロック内スレッドで共有 //10固定なのは現在のmask設定で //GPUで実行するのは最大10だから //THREAD_NUMはブロックあたりのスレッド数 __shared__ unsigned int down[THREAD_NUM][10]; down[tid][row]=totalDown[idx]; __shared__ unsigned int left[THREAD_NUM][10]; left[tid][row]=totalLeft[idx]; __shared__ unsigned int right[THREAD_NUM][10]; right[tid][row]=totalRight[idx]; __shared__ unsigned int bitmap[THREAD_NUM][10]; //down,left,rightからbitmapを出す bitmap[tid][row] =mask&~( down[tid][row] |left[tid][row] |right[tid][row]); __shared__ unsigned int sum[THREAD_NUM]; /***11 backTrack1ではaBoard不要 *********************/ //unsigned int c_aBoard[MAX]; __shared__ unsigned int usum[THREAD_NUM]; //余分なスレッドは動かさない //GPUはsteps数起動するがtotalCond以上は空回しする if(idx<totalCond){ //totalDown,totalLeft,totalRightの情報を //down,left,rightに詰め直す //CPU で詰め込んだ t_はsteps個あるが //ブロック内ではブロックあたりのスレッド数に限定 //されるので idxでよい // /***11 backTrack1ではaBoard不要*********************/ //for(int i=0;i<h_row;i++){ // c_aBoard[i]=t_aBoard[idx*h_row+i]; //2次元配列だが1次元的に利用 //} register unsigned int bitmap_tid_row; register unsigned int down_tid_row; register unsigned int left_tid_row; register unsigned int right_tid_row; while(row>=0){ bitmap_tid_row=bitmap[tid][row]; down_tid_row=down[tid][row]; left_tid_row=left[tid][row]; right_tid_row=right[tid][row]; if(bitmap_tid_row==0){ row--; }else{ /**11 枝刈り**********/ if(row+h_row<B1) { bitmap_tid_row=bitmap[tid][row]&=~2; // bm|=2; bm^=2; (bm&=~2と同等) } //クイーンを置く //置く場所があるかどうか bitmap[tid][row] /***11 backTrack1ではaBoard不要のためコメント*********************/ //^=c_aBoard[row+h_row] //=bit ^=bit =(-bitmap_tid_row&bitmap_tid_row); if((bit&mask)!=0){ //最終行?最終行から1個前の行まで //無事到達したら 加算する if(row+1==mark){ /**11 backTradk1ではsymmetryOps不要のためコメント*********************/ //int s=symmetryOps(size,c_aBoard); //if(s!=0){ //print(size); //print()でTOTALを++しない //ホストに戻す配列にTOTALを入れる //スレッドが1つの場合は配列は1個 unique++; total+=8; //対称解除で得られた解数を加算 //} row--; }else{ int rowP=row+1; down[tid][rowP]=down_tid_row|bit; left[tid][rowP]=(left_tid_row|bit)<<1; right[tid][rowP]=(right_tid_row|bit)>>1; bitmap[tid][rowP] =mask&~( down[tid][rowP] |left[tid][rowP] |right[tid][rowP]); row++; } }else{ //置く場所がなければ1個上に row--; } } } //最後sum[tid]に加算する sum[tid]=total; usum[tid]=unique; }else{ //totalCond未満は空回しするのでtotalは加算しない sum[tid]=0; usum[tid]=0; } //__syncthreads()でブロック内のスレッド間の同期 //全てのスレッドが__syncthreads()に辿り着くのを待つ __syncthreads();if(tid<64&&tid+64<THREAD_NUM){ sum[tid]+=sum[tid+64]; usum[tid]+=usum[tid+64]; } __syncwarp();if(tid<32){ sum[tid]+=sum[tid+32]; usum[tid]+=usum[tid+32]; } __syncwarp();if(tid<16){ sum[tid]+=sum[tid+16]; usum[tid]+=usum[tid+16]; } __syncwarp();if(tid<8){ sum[tid]+=sum[tid+8]; usum[tid]+=usum[tid+8]; } __syncwarp();if(tid<4){ sum[tid]+=sum[tid+4]; usum[tid]+=usum[tid+4]; } __syncwarp();if(tid<2){ sum[tid]+=sum[tid+2]; usum[tid]+=usum[tid+2]; } __syncwarp();if(tid<1){ sum[tid]+=sum[tid+1]; usum[tid]+=usum[tid+1]; } __syncwarp();if(tid==0){ d_results[bid]=sum[0]; d_uniq[bid]=usum[0]; } } // // /***11 cuda_kernel_b2新設*********************/ __global__ void cuda_kernel_b2( register int size, register int mark, unsigned int* totalDown, unsigned int* totalLeft, unsigned int* totalRight, unsigned int* d_results, unsigned int* d_uniq, register int totalCond, unsigned int* t_aBoard, register int h_row, register int B1, register int B2, register int SM, register int LM, /***12 symmetryOps 省力化のためTOPBIT,ENDBITを渡す*****/ register int TB, register int EB ) { register const unsigned int mask=(1<<size)-1; register unsigned int total=0; register unsigned int unique=0; register int row=0; register unsigned int bit; // //スレッド // //ブロック内のスレッドID register unsigned const int tid=threadIdx.x; //グリッド内のブロックID register unsigned const int bid=blockIdx.x; //全体通してのID register unsigned const int idx=bid*blockDim.x+tid; // //シェアードメモリ // //sharedメモリを使う ブロック内スレッドで共有 //10固定なのは現在のmask設定で //GPUで実行するのは最大10だから //THREAD_NUMはブロックあたりのスレッド数 __shared__ unsigned int down[THREAD_NUM][10]; down[tid][row]=totalDown[idx]; __shared__ unsigned int left[THREAD_NUM][10]; left[tid][row]=totalLeft[idx]; __shared__ unsigned int right[THREAD_NUM][10]; right[tid][row]=totalRight[idx]; __shared__ unsigned int bitmap[THREAD_NUM][10]; //down,left,rightからbitmapを出す bitmap[tid][row] =mask&~( down[tid][row] |left[tid][row] |right[tid][row]); __shared__ unsigned int sum[THREAD_NUM]; unsigned int c_aBoard[MAX]; __shared__ unsigned int usum[THREAD_NUM]; //余分なスレッドは動かさない //GPUはsteps数起動するがtotalCond以上は空回しする if(idx<totalCond){ //totalDown,totalLeft,totalRightの情報を //down,left,rightに詰め直す //CPU で詰め込んだ t_はsteps個あるが //ブロック内ではブロックあたりのスレッド数に限定 //されるので idxでよい // for(int i=0;i<h_row;i++){ c_aBoard[i]=t_aBoard[idx*h_row+i]; //2次元配列だが1次元的に利用 } register unsigned int bitmap_tid_row; register unsigned int down_tid_row; register unsigned int left_tid_row; register unsigned int right_tid_row; while(row>=0){ bitmap_tid_row=bitmap[tid][row]; down_tid_row=down[tid][row]; left_tid_row=left[tid][row]; right_tid_row=right[tid][row]; // //bitmap[tid][row]=00000000 クイーンを //どこにも置けないので1行上に戻る if(bitmap_tid_row==0){ row--; }else{ /**11 枝刈り追加**********/ //【枝刈り】上部サイド枝刈り if(row+h_row<B1){ //printf("BOUND1_row:%d:h_row:%d:row+hrow:%d:bit:%d\n",row,h_row,row+h_row,bitmap[tid][row]); bitmap_tid_row=bitmap[tid][row]&=~SM; //【枝刈り】下部サイド枝刈り }else if(row+h_row==B2) { //printf("BOUND2_row:%d:h_row:%d:row+hrow:%d:bit:%d\n",row,h_row,row+h_row,bitmap[tid][row]); if((down_tid_row&SM)==0){ row--; continue; //printf("BOUND2_row\n"); } if((down_tid_row&SM)!=SM){ bitmap_tid_row=bitmap[tid][row]&=SM; //printf("BOUND2_SIDEMASK\n"); } } int save_bitmap=bitmap[tid][row]; //クイーンを置く //置く場所があるかどうか bitmap[tid][row] ^=c_aBoard[row+h_row] =bit =(-bitmap_tid_row&bitmap_tid_row); if((bit&mask)!=0){ //最終行?最終行から1個前の行まで //無事到達したら 加算する if(row+1==mark){ /***11 LASTMASK枝刈り*********************/ if((save_bitmap&LM)==0){ /***12 symmetryOps 省力化のためBOUND1,BOUND2,TOPBIT,ENDBITを渡す*****/ int s=symmetryOps(size,c_aBoard,B1,B2,TB,EB); if(s!=0){ //print(size); //print()でTOTALを++しない //ホストに戻す配列にTOTALを入れる //スレッドが1つの場合は配列は1個 unique++; total+=s; //対称解除で得られた解数を加算 } row--; } }else{ int rowP=row+1; down[tid][rowP]=down_tid_row|bit; left[tid][rowP]=(left_tid_row|bit)<<1; right[tid][rowP]=(right_tid_row|bit)>>1; bitmap[tid][rowP] =mask&~( down[tid][rowP] |left[tid][rowP] |right[tid][rowP]); row++; } }else{ //置く場所がなければ1個上に row--; } } } //最後sum[tid]に加算する sum[tid]=total; usum[tid]=unique; }else{ //totalCond未満は空回しするのでtotalは加算しない sum[tid]=0; usum[tid]=0; } //__syncthreads()でブロック内のスレッド間の同期 //全てのスレッドが__syncthreads()に辿り着くのを待つ __syncthreads();if(tid<64&&tid+64<THREAD_NUM){ sum[tid]+=sum[tid+64]; usum[tid]+=usum[tid+64]; } __syncwarp();if(tid<32){ sum[tid]+=sum[tid+32]; usum[tid]+=usum[tid+32]; } __syncwarp();if(tid<16){ sum[tid]+=sum[tid+16]; usum[tid]+=usum[tid+16]; } __syncwarp();if(tid<8){ sum[tid]+=sum[tid+8]; usum[tid]+=usum[tid+8]; } __syncwarp();if(tid<4){ sum[tid]+=sum[tid+4]; usum[tid]+=usum[tid+4]; } __syncwarp();if(tid<2){ sum[tid]+=sum[tid+2]; usum[tid]+=usum[tid+2]; } __syncwarp();if(tid<1){ sum[tid]+=sum[tid+1]; usum[tid]+=usum[tid+1]; } __syncwarp();if(tid==0){ d_results[bid]=sum[0]; d_uniq[bid]=usum[0]; } } // long backTrack2G(int size,int mask,int row,int n_left,int n_down,int n_right,int steps,int BOUND1,int BOUND2,int SIDEMASK,int LASTMASK,int TOPBIT,int ENDBIT,unsigned int* aBoard) { //何行目からGPUで行くか。ここの設定は変更可能、設定値を多くするほどGPUで並行して動く /***11 size<8の時はmarkが2*********************/ unsigned int mark=size>12?size-10:3; //unsigned int mark=size>11?size-9:3; if(size<8){ mark=2; } const unsigned int h_mark=row; long total=0; int totalCond=0; bool matched=false; //host unsigned int down[32]; down[row]=n_down; unsigned int right[32]; right[row]=n_right; unsigned int left[32]; left[row]=n_left; //bitmapを配列で持つことにより //stackを使わないで1行前に戻れる unsigned int bitmap[32]; bitmap[row]=mask&~(left[row]|down[row]|right[row]); unsigned int bit; unsigned int* totalDown; cudaMallocHost((void**) &totalDown,sizeof(int)*steps); unsigned int* totalLeft; cudaMallocHost((void**) &totalLeft,sizeof(int)*steps); unsigned int* totalRight; cudaMallocHost((void**) &totalRight,sizeof(int)*steps); unsigned int* h_results; cudaMallocHost((void**) &h_results,sizeof(int)*steps); unsigned int* h_uniq; cudaMallocHost((void**) &h_uniq,sizeof(int)*steps); unsigned int* t_aBoard; cudaMallocHost((void**) &t_aBoard,sizeof(int)*steps*mark); //device unsigned int* downCuda; cudaMalloc((void**) &downCuda,sizeof(int)*steps); unsigned int* leftCuda; cudaMalloc((void**) &leftCuda,sizeof(int)*steps); unsigned int* rightCuda; cudaMalloc((void**) &rightCuda,sizeof(int)*steps); unsigned int* resultsCuda; cudaMalloc((void**) &resultsCuda,sizeof(int)*steps/THREAD_NUM); unsigned int* d_uniq; cudaMalloc((void**) &d_uniq,sizeof(int)*steps/THREAD_NUM); unsigned int* d_aBoard; cudaMalloc((void**) &d_aBoard,sizeof(int)*steps*mark); //12行目までは3行目までCPU->row==mark以下で 3行目までの //down,left,right情報を totalDown,totalLeft,totalRight //に格納 //する->3行目以降をGPUマルチスレッドで実行し結果を取得 //13行目以降はCPUで実行する行数が1個ずつ増えて行く //例えばn15だとrow=5までCPUで実行し、 //それ以降はGPU(現在の設定だとGPUでは最大10行実行する //ようになっている) register int rowP=0; while(row>=h_mark) { //bitmap[row]=00000000 クイーンを //どこにも置けないので1行上に戻る //06GPU こっちのほうが優秀 if(bitmap[row]==0){ row--; } else{//おける場所があれば進む /***11 枝刈り追加*********************/ //【枝刈り】上部サイド枝刈り if(row<BOUND1){ bitmap[row]&=~SIDEMASK; //【枝刈り】下部サイド枝刈り }else if(row==BOUND2) { if((down[row]&SIDEMASK)==0){ row--; } if((down[row]&SIDEMASK)!=SIDEMASK){ bitmap[row]&=SIDEMASK; } } //06SGPU bitmap[row]^=aBoard[row]=bit=(-bitmap[row]&bitmap[row]); if((bit&mask)!=0){//置く場所があれば先に進む rowP=row+1; down[rowP]=down[row]|bit; left[rowP]=(left[row]|bit)<<1; right[rowP]=(right[row]|bit)>>1; bitmap[rowP]=mask&~(down[rowP]|left[rowP]|right[rowP]); row++; if(row==mark){ //3行目(mark)にクイーンを1個ずつ置いていって、 //down,left,right情報を格納、 //その次の行へは進まない。その行で可能な場所にクイー //ン置き終わったらGPU並列実行 //totalCond がthreadIdになる 各スレッドに down,left,right情報を渡す //row=2(13行目以降は増えていく。例えばn15だとrow=5)の情報を //totalDown,totalLeft,totalRightに格納する totalDown[totalCond]=down[row]; totalLeft[totalCond]=left[row]; totalRight[totalCond]=right[row]; for(int i=0;i<mark;i++){ t_aBoard[totalCond*mark+i]=aBoard[i]; } //スレッド数をインクリメントする totalCond++; //最大GPU数に達してしまったら一旦ここでGPUを実行する。stepsはGPUの同 //時並行稼働数を制御 //nの数が少ないうちはtotalCondがstepsを超えることはないがnの数が増え //て行くと超えるようになる。 //ここではtotalCond==stepsの場合だけこの中へ if(totalCond==steps){ //matched=trueの時にCOUNT追加 //GPU内でカウントしているので、GPUか //ら出たらmatched=trueになってる if(matched){ cudaMemcpy(h_results,resultsCuda, sizeof(int)*steps/THREAD_NUM,cudaMemcpyDeviceToHost); cudaMemcpy(h_uniq,d_uniq, sizeof(int)*steps/THREAD_NUM,cudaMemcpyDeviceToHost); for(int col=0;col<steps/THREAD_NUM;col++){ total+=h_results[col]; UNIQUE+=h_uniq[col]; } matched=false; } cudaMemcpy(downCuda,totalDown, sizeof(int)*totalCond,cudaMemcpyHostToDevice); cudaMemcpy(leftCuda,totalLeft, sizeof(int)*totalCond,cudaMemcpyHostToDevice); cudaMemcpy(rightCuda,totalRight, sizeof(int)*totalCond,cudaMemcpyHostToDevice); cudaMemcpy(d_aBoard,t_aBoard, sizeof(int)*totalCond*mark,cudaMemcpyHostToDevice); /***12 TOPBIT,ENDBIT追加*********************/ //cuda_kernel_b2<<<steps/THREAD_NUM,THREAD_NUM // >>>(size,size-mark,downCuda,leftCuda,rightCuda,resultsCuda,d_uniq,totalCond,d_aBoard,row,BOUND1,BOUND2,SIDEMASK,LASTMASK); cuda_kernel_b2<<<steps/THREAD_NUM,THREAD_NUM >>>(size,size-mark,downCuda,leftCuda,rightCuda,resultsCuda,d_uniq,totalCond,d_aBoard,row,BOUND1,BOUND2,SIDEMASK,LASTMASK,TOPBIT,ENDBIT); //steps数の数だけマルチスレッドで起動するのだが、実際に計算が行われ //るのはtotalCondの数だけでそれ以外は空回しになる //GPU内でカウントしているので、GPUから出たらmatched=trueになってる matched=true; //totalCond==stepsルートでGPUを実行したらスレッドをまた0から開始す //る(これによりなんどもsteps数分だけGPUを起動できる) totalCond=0; } //totalDown,totalLeft,totalRightに情報を格納したら1行上に上がる //これを繰り返すことにより row=2で可能な場所全てにクイーンを置いて //totalDown,totalLeft,totalRightに情報を格納する row--; } }else{ //置く場所がなければ上に上がる。row==mark行に達するまではCPU側で普通に //nqueenをやる row--; } } } //matched=trueの時にCOUNT追加 //GPU内でカウントしているので、GPUから出たら //matched=trueになってる if(matched){ cudaMemcpy(h_results,resultsCuda, sizeof(int)*steps/THREAD_NUM,cudaMemcpyDeviceToHost); cudaMemcpy(h_uniq,d_uniq, sizeof(int)*steps/THREAD_NUM,cudaMemcpyDeviceToHost); for(int col=0;col<steps/THREAD_NUM;col++){ total+=h_results[col]; UNIQUE+=h_uniq[col]; } matched=false; } cudaMemcpy(downCuda,totalDown, sizeof(int)*totalCond,cudaMemcpyHostToDevice); cudaMemcpy(leftCuda,totalLeft, sizeof(int)*totalCond,cudaMemcpyHostToDevice); cudaMemcpy(rightCuda,totalRight, sizeof(int)*totalCond,cudaMemcpyHostToDevice); cudaMemcpy(d_aBoard,t_aBoard, sizeof(int)*totalCond*mark,cudaMemcpyHostToDevice); //size-mark は何行GPUを実行するか totalCondはスレッド数 //steps数の数だけマルチスレッドで起動するのだが、実際に計算が行われるのは //totalCondの数だけでそれ以外は空回しになる /***12 TOPBIT,ENDBIT追加*********************/ //cuda_kernel_b2<<<steps/THREAD_NUM,THREAD_NUM // >>>(size,size-mark,downCuda,leftCuda,rightCuda,resultsCuda,d_uniq,totalCond,d_aBoard,mark,BOUND1,BOUND2,SIDEMASK,LASTMASK); cuda_kernel_b2<<<steps/THREAD_NUM,THREAD_NUM >>>(size,size-mark,downCuda,leftCuda,rightCuda,resultsCuda,d_uniq,totalCond,d_aBoard,mark,BOUND1,BOUND2,SIDEMASK,LASTMASK,TOPBIT,ENDBIT); cudaMemcpy(h_results,resultsCuda, sizeof(int)*steps/THREAD_NUM,cudaMemcpyDeviceToHost); cudaMemcpy(h_uniq,d_uniq, sizeof(int)*steps/THREAD_NUM,cudaMemcpyDeviceToHost); for(int col=0;col<steps/THREAD_NUM;col++){ total+=h_results[col]; UNIQUE+=h_uniq[col]; } // cudaFree(downCuda); cudaFree(leftCuda); cudaFree(rightCuda); cudaFree(resultsCuda); cudaFree(d_uniq); cudaFree(d_aBoard); cudaFreeHost(totalDown); cudaFreeHost(totalLeft); cudaFreeHost(totalRight); cudaFreeHost(h_results); cudaFreeHost(h_uniq); cudaFreeHost(t_aBoard); return total; } // long backTrack1G(int size,int mask,int row,int n_left,int n_down,int n_right,int steps,int BOUND1) { //何行目からGPUで行くか。ここの設定は変更可能、設定値を多くするほどGPUで並行して動く /***08 クイーンを2行目まで固定で置くためmarkが3以上必要*********************/ const unsigned int mark=size>12?size-10:3; const unsigned int h_mark=row; long total=0; int totalCond=0; bool matched=false; //host unsigned int down[32]; down[row]=n_down; unsigned int right[32]; right[row]=n_right; unsigned int left[32]; left[row]=n_left; //bitmapを配列で持つことにより //stackを使わないで1行前に戻れる unsigned int bitmap[32]; bitmap[row]=mask&~(left[row]|down[row]|right[row]); unsigned int bit; unsigned int* totalDown; cudaMallocHost((void**) &totalDown,sizeof(int)*steps); unsigned int* totalLeft; cudaMallocHost((void**) &totalLeft,sizeof(int)*steps); unsigned int* totalRight; cudaMallocHost((void**) &totalRight,sizeof(int)*steps); unsigned int* h_results; cudaMallocHost((void**) &h_results,sizeof(int)*steps); unsigned int* h_uniq; cudaMallocHost((void**) &h_uniq,sizeof(int)*steps); /***11 backTrack1ではaBoard不要のためコメント*********************/ //unsigned int* t_aBoard; //cudaMallocHost((void**) &t_aBoard,sizeof(int)*steps*mark); //device unsigned int* downCuda; cudaMalloc((void**) &downCuda,sizeof(int)*steps); unsigned int* leftCuda; cudaMalloc((void**) &leftCuda,sizeof(int)*steps); unsigned int* rightCuda; cudaMalloc((void**) &rightCuda,sizeof(int)*steps); unsigned int* resultsCuda; cudaMalloc((void**) &resultsCuda,sizeof(int)*steps/THREAD_NUM); unsigned int* d_uniq; cudaMalloc((void**) &d_uniq,sizeof(int)*steps/THREAD_NUM); /***11 backTrack1ではaBoard不要のためコメント*********************/ //unsigned int* d_aBoard; //cudaMalloc((void**) &d_aBoard,sizeof(int)*steps*mark); //12行目までは3行目までCPU->row==mark以下で 3行目までの //down,left,right情報を totalDown,totalLeft,totalRight //に格納 //する->3行目以降をGPUマルチスレッドで実行し結果を取得 //13行目以降はCPUで実行する行数が1個ずつ増えて行く //例えばn15だとrow=5までCPUで実行し、 //それ以降はGPU(現在の設定だとGPUでは最大10行実行する //ようになっている) //while(row>=0) { register int rowP=0; while(row>=h_mark) { //bitmap[row]=00000000 クイーンを //どこにも置けないので1行上に戻る //06GPU こっちのほうが優秀 if(bitmap[row]==0){ row--; } else{//おける場所があれば進む /***11 枝刈り*********************/ if(row<BOUND1) { bitmap[row]&=~2; // bm|=2; bm^=2; (bm&=~2と同等) } //06SGPU /***11 aBoard不要*********************/ //bitmap[row]^=aBoard[row]=bit=(-bitmap[row]&bitmap[row]); bitmap[row]^=bit=(-bitmap[row]&bitmap[row]); if((bit&mask)!=0){//置く場所があれば先に進む rowP=row+1; down[rowP]=down[row]|bit; left[rowP]=(left[row]|bit)<<1; right[rowP]=(right[row]|bit)>>1; bitmap[rowP]=mask&~(down[rowP]|left[rowP]|right[rowP]); row++; if(row==mark){ //3行目(mark)にクイーンを1個ずつ置いていって、 //down,left,right情報を格納、 //その次の行へは進まない。その行で可能な場所にクイー //ン置き終わったらGPU並列実行 //totalCond がthreadIdになる 各スレッドに down,left,right情報を渡す //row=2(13行目以降は増えていく。例えばn15だとrow=5)の情報を //totalDown,totalLeft,totalRightに格納する totalDown[totalCond]=down[row]; totalLeft[totalCond]=left[row]; totalRight[totalCond]=right[row]; /***11 aBoardコメント*********************/ //for(int i=0;i<mark;i++){ // t_aBoard[totalCond*mark+i]=aBoard[i]; //} //スレッド数をインクリメントする totalCond++; //最大GPU数に達してしまったら一旦ここでGPUを実行する。stepsはGPUの同 //時並行稼働数を制御 //nの数が少ないうちはtotalCondがstepsを超えることはないがnの数が増え //て行くと超えるようになる。 //ここではtotalCond==stepsの場合だけこの中へ if(totalCond==steps){ //matched=trueの時にCOUNT追加 //GPU内でカウントしているので、GPUか //ら出たらmatched=trueになってる if(matched){ cudaMemcpy(h_results,resultsCuda, sizeof(int)*steps/THREAD_NUM,cudaMemcpyDeviceToHost); cudaMemcpy(h_uniq,d_uniq, sizeof(int)*steps/THREAD_NUM,cudaMemcpyDeviceToHost); for(int col=0;col<steps/THREAD_NUM;col++){ total+=h_results[col]; UNIQUE+=h_uniq[col]; } matched=false; } cudaMemcpy(downCuda,totalDown, sizeof(int)*totalCond,cudaMemcpyHostToDevice); cudaMemcpy(leftCuda,totalLeft, sizeof(int)*totalCond,cudaMemcpyHostToDevice); cudaMemcpy(rightCuda,totalRight, sizeof(int)*totalCond,cudaMemcpyHostToDevice); /***11 aBoard不要のためコメント*********************/ //cudaMemcpy(d_aBoard,t_aBoard, // sizeof(int)*totalCond*mark,cudaMemcpyHostToDevice); /***11 BOUND1追加*********************/ //cuda_kernel<<<steps/THREAD_NUM,THREAD_NUM // >>>(size,size-mark,downCuda,leftCuda,rightCuda,resultsCuda,d_uniq,totalCond,d_aBoard,row); cuda_kernel_b1<<<steps/THREAD_NUM,THREAD_NUM >>>(size,size-mark,downCuda,leftCuda,rightCuda,resultsCuda,d_uniq,totalCond,row,BOUND1); //steps数の数だけマルチスレッドで起動するのだが、実際に計算が行われ //るのはtotalCondの数だけでそれ以外は空回しになる //GPU内でカウントしているので、GPUから出たらmatched=trueになってる matched=true; //totalCond==stepsルートでGPUを実行したらスレッドをまた0から開始す //る(これによりなんどもsteps数分だけGPUを起動できる) totalCond=0; } //totalDown,totalLeft,totalRightに情報を格納したら1行上に上がる //これを繰り返すことにより row=2で可能な場所全てにクイーンを置いて //totalDown,totalLeft,totalRightに情報を格納する row--; } }else{ //置く場所がなければ上に上がる。row==mark行に達するまではCPU側で普通に //nqueenをやる row--; } } } //matched=trueの時にCOUNT追加 //GPU内でカウントしているので、GPUから出たら //matched=trueになってる if(matched){ cudaMemcpy(h_results,resultsCuda, sizeof(int)*steps/THREAD_NUM,cudaMemcpyDeviceToHost); cudaMemcpy(h_uniq,d_uniq, sizeof(int)*steps/THREAD_NUM,cudaMemcpyDeviceToHost); for(int col=0;col<steps/THREAD_NUM;col++){ total+=h_results[col]; UNIQUE+=h_uniq[col]; } matched=false; } cudaMemcpy(downCuda,totalDown, sizeof(int)*totalCond,cudaMemcpyHostToDevice); cudaMemcpy(leftCuda,totalLeft, sizeof(int)*totalCond,cudaMemcpyHostToDevice); cudaMemcpy(rightCuda,totalRight, sizeof(int)*totalCond,cudaMemcpyHostToDevice); /***11 aBoard不要のためコメント*********************/ //cudaMemcpy(d_aBoard,t_aBoard, // sizeof(int)*totalCond*mark,cudaMemcpyHostToDevice); //size-mark は何行GPUを実行するか totalCondはスレッド数 //steps数の数だけマルチスレッドで起動するのだが、実際に計算が行われるのは //totalCondの数だけでそれ以外は空回しになる /***11 BOUND1追加*********************/ //cuda_kernel<<<steps/THREAD_NUM,THREAD_NUM // >>>(size,size-mark,downCuda,leftCuda,rightCuda,resultsCuda,d_uniq,totalCond,d_aBoard,mark); cuda_kernel_b1<<<steps/THREAD_NUM,THREAD_NUM >>>(size,size-mark,downCuda,leftCuda,rightCuda,resultsCuda,d_uniq,totalCond,mark,BOUND1); cudaMemcpy(h_results,resultsCuda, sizeof(int)*steps/THREAD_NUM,cudaMemcpyDeviceToHost); cudaMemcpy(h_uniq,d_uniq, sizeof(int)*steps/THREAD_NUM,cudaMemcpyDeviceToHost); for(int col=0;col<steps/THREAD_NUM;col++){ total+=h_results[col]; UNIQUE+=h_uniq[col]; } // cudaFree(downCuda); cudaFree(leftCuda); cudaFree(rightCuda); cudaFree(resultsCuda); cudaFree(d_uniq); /***11 aBoardコメント**/ //cudaFree(d_aBoard); cudaFreeHost(totalDown); cudaFreeHost(totalLeft); cudaFreeHost(totalRight); cudaFreeHost(h_results); cudaFreeHost(h_uniq); /***11 aBoardコメント**/ //cudaFreeHost(t_aBoard); return total; } // //GPU void NQueenG(register int size,register int steps) { if(size<=0||size>32){return;} /** パラメータは渡す変数はregisterとする int型は unsigned とする total: グローバル変数TOTALへのアクセスを極小化する sizeE:size-1といった計算を変数に格納しフラット化する */ unsigned int total=0; unsigned int sizeE=size-1; register unsigned int aBoard[MAX]; register int bit=0; register int mask=((1<<size)-1); int col=0;//1行め右端 0 aBoard[0]=bit=(1<<col); register int left=bit<<1,down=bit,right=bit>>1; /** 2行目は右から3列目から左端から2列目まで */ for(register int BOUND1=2;BOUND1<sizeE;BOUND1++){ aBoard[1]=bit=(1<<BOUND1); total+=backTrack1G(size,mask,2, (left|bit)<<1,(down|bit),(right|bit)>>1, steps,BOUND1); } register int LASTMASK,SIDEMASK; register int TOPBIT=1<<(sizeE); SIDEMASK=LASTMASK=(TOPBIT|1); register int ENDBIT=(TOPBIT>>1); /** 1行目右から2列目から 偶数個は1/2 n=8 なら 1,2,3 奇数個は1/2+1 n=9 なら 1,2,3,4 */ for(register int BOUND1=1,BOUND2=sizeE-1;BOUND1<BOUND2;BOUND1++,BOUND2--){ aBoard[0]=bit=(1<<BOUND1); total+=backTrack2G(size,mask,1, bit<<1,bit,bit>>1, steps,BOUND1,BOUND2,SIDEMASK,LASTMASK,TOPBIT,ENDBIT,aBoard); LASTMASK|=LASTMASK>>1|LASTMASK<<1; ENDBIT>>=1; } /** グローバル変数へのアクセスを極小化する */ TOTAL=total; } /** CUDA 初期化 **/ bool InitCUDA() { int count; cudaGetDeviceCount(&count); if(count==0){fprintf(stderr,"There is no device.\n");return false;} int i; for(i=0;i<count;i++){ cudaDeviceProp prop; if(cudaGetDeviceProperties(&prop,i)==cudaSuccess){if(prop.major>=1){break;} } } if(i==count){fprintf(stderr,"There is no device supporting CUDA 1.x.\n");return false;} cudaSetDevice(i); return true; } // void symmetryOps(local *l) { int own,ptn,you,bit; //90度回転 if(l->aBoard[l->BOUND2]==1){ own=1; ptn=2; while(own<=G.sizeE){ bit=1; you=G.sizeE; while((l->aBoard[you]!=ptn)&&(l->aBoard[own]>=bit)){ bit<<=1; you--; } if(l->aBoard[own]>bit){ return; } if(l->aBoard[own]<bit){ break; } own++; ptn<<=1; } /** 90度回転して同型なら180度/270度回転も同型である */ if(own>G.sizeE){ l->COUNT2[l->BOUND1]++; return; } } //180度回転 if(l->aBoard[G.sizeE]==l->ENDBIT){ own=1; you=G.sizeE-1; while(own<=G.sizeE){ bit=1; ptn=l->TOPBIT; while((l->aBoard[you]!=ptn)&&(l->aBoard[own]>=bit)){ bit<<=1; ptn>>=1; } if(l->aBoard[own]>bit){ return; } if(l->aBoard[own]<bit){ break; } own++; you--; } /** 90度回転が同型でなくても180度回転が同型である事もある */ if(own>G.sizeE){ l->COUNT4[l->BOUND1]++; return; } } //270度回転 if(l->aBoard[l->BOUND1]==l->TOPBIT){ own=1; ptn=l->TOPBIT>>1; while(own<=G.sizeE){ bit=1; you=0; while((l->aBoard[you]!=ptn)&&(l->aBoard[own]>=bit)){ bit<<=1; you++; } if(l->aBoard[own]>bit){ return; } if(l->aBoard[own]<bit){ break; } own++; ptn>>=1; } } l->COUNT8[l->BOUND1]++; } // //CPU 非再帰版 backTrack2//新しく記述 void backTrack2_NR(int row,int h_left,int h_down,int h_right,local *l) { unsigned int left[G.size]; unsigned int down[G.size]; unsigned int right[G.size]; unsigned int bitmap[G.size]; left[row]=h_left; down[row]=h_down; right[row]=h_right; bitmap[row]=l->mask&~(left[row]|down[row]|right[row]); unsigned int bit; int mark=row; //固定していれた行より上はいかない while(row>=mark){//row=1 row>=1, row=2 row>=2 if(bitmap[row]==0){ --row; }else{ //【枝刈り】上部サイド枝刈り if(row<l->BOUND1){ bitmap[row]&=~l->SIDEMASK; //【枝刈り】下部サイド枝刈り }else if(row==l->BOUND2) { if((down[row]&l->SIDEMASK)==0){ row--; } if((down[row]&l->SIDEMASK)!=l->SIDEMASK){ bitmap[row]&=l->SIDEMASK; } } int save_bitmap=bitmap[row]; bitmap[row]^=l->aBoard[row]=bit=(-bitmap[row]&bitmap[row]); if((bit&l->mask)!=0){ if(row==G.sizeE){ if((save_bitmap&l->LASTMASK)==0){ symmetryOps(l); --row; } }else{ int n=row++; left[row]=(left[n]|bit)<<1; down[row]=down[n]|bit; right[row]=(right[n]|bit)>>1; bitmap[row]=l->mask&~(left[row]|down[row]|right[row]); } }else{ --row; } } } } // //通常版 CPU 非再帰版 backTrack2 void backTrack2D_NR(int row,int left,int down,int right,local *l) { int bitmap,bit; int b[100], *p=b; int odd=G.size&1; //奇数:1 偶数:0 for(int i=0;i<(1+odd);++i){ bitmap=0; if(0==i){ int half=G.size>>1; // size/2 bitmap=(1<<half)-1; }else{ bitmap=1<<(G.size>>1); // down[1]=bitmap; // right[1]=(bitmap>>1); // left[1]=(bitmap<<1); // pnStack=aStack+1; // *pnStack++=0; } mais1:bitmap=l->mask&~(left|down|right); // 【枝刈り】 if(row==G.sizeE){ if(bitmap){ //【枝刈り】 最下段枝刈り if((bitmap&l->LASTMASK)==0){ l->aBoard[row]=bitmap; symmetryOps(l); } } }else{ //【枝刈り】上部サイド枝刈り if(row<l->BOUND1){ bitmap&=~l->SIDEMASK; //【枝刈り】下部サイド枝刈り }else if(row==l->BOUND2){ if(!(down&l->SIDEMASK)) goto volta; if((down&l->SIDEMASK)!=l->SIDEMASK) bitmap&=l->SIDEMASK; } if(bitmap){ outro:bitmap^=l->aBoard[row]=bit=-bitmap&bitmap; if(bitmap){ *p++=left; *p++=down; *p++=right; } *p++=bitmap; row++; left=(left|bit)<<1; down=down|bit; right=(right|bit)>>1; goto mais1; //Backtrack2(y+1, (left | bit)<<1, down | bit, (right | bit)>>1); volta:if(p<=b) return; row--; bitmap=*--p; if(bitmap){ right=*--p; down=*--p; left=*--p; goto outro; }else{ goto volta; } } } goto volta; } } //CPU 非再帰版 backTrack void backTrack1_NR(int row,int h_left,int h_down,int h_right,local *l) { unsigned int left[G.size]; unsigned int down[G.size]; unsigned int right[G.size]; unsigned int bitmap[G.size]; left[row]=h_left; down[row]=h_down; right[row]=h_right; bitmap[row]=l->mask&~(left[row]|down[row]|right[row]); unsigned int bit; int mark=row; //固定していれた行より上はいかない while(row>=mark){//row=1 row>=1, row=2 row>=2 if(bitmap[row]==0){ --row; }else{ if(row<l->BOUND1) { bitmap[row]&=~2; // bm|=2; bm^=2; (bm&=~2と同等) } bitmap[row]^=l->aBoard[row]=bit=(-bitmap[row]&bitmap[row]); if((bit&l->mask)!=0){ if(row==G.sizeE){ l->COUNT8[l->BOUND1]++; --row; }else{ int n=row++; left[row]=(left[n]|bit)<<1; down[row]=down[n]|bit; right[row]=(right[n]|bit)>>1; bitmap[row]=l->mask&~(left[row]|down[row]|right[row]); } }else{ --row; } } } } //通常版 CPU 非再帰版 backTrack void backTrack1D_NR(int row,int left,int down,int right,local *l) { int bitmap,bit; int b[100], *p=b; int odd=G.size&1; //奇数:1 偶数:0 for(int i=0;i<(1+odd);++i){ bitmap=0; if(0==i){ int half=G.size>>1; // size/2 bitmap=(1<<half)-1; }else{ bitmap=1<<(G.size>>1); // down[1]=bitmap; // right[1]=(bitmap>>1); // left[1]=(bitmap<<1); // pnStack=aStack+1; // *pnStack++=0; } b1mais1:bitmap=l->mask&~(left|down|right); //【枝刈り】1行目角にクイーンがある場合回転対称チェックを省略 if(row==G.sizeE){ if(bitmap){ // l->aBoard[row]=bitmap; l->COUNT8[l->BOUND1]++; } }else{ //【枝刈り】鏡像についても主対角線鏡像のみを判定すればよい // 2行目、2列目を数値とみなし、2行目<2列目という条件を課せばよい if(row<l->BOUND1) { bitmap&=~2; // bm|=2; bm^=2; (bm&=~2と同等) } if(bitmap){ b1outro:bitmap^=l->aBoard[row]=bit=-bitmap&bitmap; if(bitmap){ *p++=left; *p++=down; *p++=right; } *p++=bitmap; row++; left=(left|bit)<<1; down=down|bit; right=(right|bit)>>1; goto b1mais1; //Backtrack1(y+1, (left | bit)<<1, down | bit, (right | bit)>>1); b1volta:if(p<=b) return; row--; bitmap=*--p; if(bitmap){ right=*--p; down=*--p; left=*--p; goto b1outro; }else{ goto b1volta; } } } goto b1volta; } } // //CPU 再帰版 backTrack void backTrack2(int row,int left,int down,int right,local *l) { int bitmap=0; int bit=0; bitmap=(l->mask&~(left|down|right)); if(row==G.sizeE){ if(bitmap){ //【枝刈り】 最下段枝刈り if((bitmap&l->LASTMASK)==0){ l->aBoard[row]=(-bitmap&bitmap); symmetryOps(l); } } }else{ //【枝刈り】上部サイド枝刈り if(row<l->BOUND1){ bitmap&=~l->SIDEMASK; //【枝刈り】下部サイド枝刈り }else if(row==l->BOUND2) { if((down&l->SIDEMASK)==0){ return; } if((down&l->SIDEMASK)!=l->SIDEMASK){ bitmap&=l->SIDEMASK; } } while(bitmap){ bitmap^=l->aBoard[row]=bit=(-bitmap&bitmap); backTrack2(row+1,(left|bit)<<1, down|bit,(right|bit)>>1,l); } } } //通常版 CPU 再帰版 backTrack void backTrack2D(int row,int left,int down,int right,local *l) { int bit; int bitmap=l->mask&~(left|down|right); if(row==G.sizeE){ // 【枝刈り】 if(bitmap){ if((bitmap&l->LASTMASK)==0){ //【枝刈り】 最下段枝刈り l->aBoard[row]=bitmap; symmetryOps(l); } } }else{ if(row<l->BOUND1){ //【枝刈り】上部サイド枝刈り bitmap&=~l->SIDEMASK; }else if(row==l->BOUND2) { //【枝刈り】下部サイド枝刈り if((down&l->SIDEMASK)==0){ return; } if((down&l->SIDEMASK)!=l->SIDEMASK){ bitmap&=l->SIDEMASK; } } while(bitmap){ bitmap^=l->aBoard[row]=bit=(-bitmap&bitmap); backTrack2D(row+1,(left|bit)<<1,down|bit,(right|bit)>>1,l); } } } // //CPU 再帰版 backTrack void backTrack1(int row,int left,int down,int right,local *l) { int bitmap=0; int bit=0; bitmap=(l->mask&~(left|down|right)); if(row==G.sizeE){ if(bitmap){ l->COUNT8[l->BOUND1]++; } }else{ if(row<l->BOUND1) { bitmap&=~2; // bm|=2; bm^=2; (bm&=~2と同等) } while(bitmap){ bitmap^=l->aBoard[row]=bit=(-bitmap&bitmap); backTrack1(row+1,(left|bit)<<1, down|bit,(right|bit)>>1,l); } } } //通常版 CPU 再帰版 backTrack void backTrack1D(int row,int left,int down,int right,local *l) { int bit; int bitmap=l->mask&~(left|down|right); //【枝刈り】1行目角にクイーンがある場合回転対称チェックを省略 if(row==G.sizeE) { if(bitmap){ /* l->aBoard[row]=bitmap; */ l->COUNT8[l->BOUND1]++; } }else{ //【枝刈り】鏡像についても主対角線鏡像のみを判定すればよい // 2行目、2列目を数値とみなし、2行目<2列目という条件を課せばよい if(row<l->BOUND1) { bitmap&=~2; // bm|=2; bm^=2; (bm&=~2と同等) } while(bitmap){ bitmap^=l->aBoard[row]=bit=(-bitmap&bitmap); backTrack1D(row+1,(left|bit)<<1,down|bit,(right|bit)>>1,l); } } } //チルドスレッドの実行処理 void *run(void *args) { /** //グローバル構造体 typedef struct { int size; int sizeE; long lTOTAL,lUNIQUE; }GCLASS, *GClass; GCLASS G; */ local *l=(local *)args; /** 最上段のクイーンが「角」にある場合の探索 */ int bit=0; int col=0; if(l->BOUND1>1 && l->BOUND1<G.sizeE) { l->aBoard[0]=bit=(1<<col); int left=bit<<1;int down=bit;int right=bit>>1; if(l->BOUND1<G.sizeE) { col=l->BOUND1;// 角にクイーンを配置 l->aBoard[1]=bit=(1<<col); if(NR==1){//2行目から非再帰 backTrack1_NR(2,(left|bit)<<1,(down|bit),(right|bit)>>1,l);//GPU適用版 //backTrack1D_NR(2,(left|bit)<<1,(down|bit),(right|bit)>>1,l); }else{//2行目から再帰 backTrack1(2,(left|bit)<<1,(down|bit),(right|bit)>>1,l);//GPU適用版 //backTrack1D(2,(left|bit)<<1,(down|bit),(right|bit)>>1,l);//通常版 } } } l->TOPBIT=1<<(G.sizeE); l->ENDBIT=(l->TOPBIT>>l->BOUND1); l->SIDEMASK=l->LASTMASK=(l->TOPBIT|1); /** 最上段行のクイーンが「角以外」にある場合の探索 ユニーク解に対する左右対称解を予め削除するには、 左半分だけにクイーンを配置するようにすればよい */ if(l->BOUND1>0&&l->BOUND2<G.sizeE&&l->BOUND1<l->BOUND2){ for(int i=1; i<l->BOUND1; i++){ l->LASTMASK=l->LASTMASK|l->LASTMASK>>1|l->LASTMASK<<1; } if(l->BOUND1<l->BOUND2){ int col=l->BOUND1; l->aBoard[0]=bit=(1<<col); if(NR==1){//2行目から非再帰 backTrack2_NR(1,bit<<1,bit,bit>>1,l); //GPU適用版 //backTrack2D_NR(1,bit<<1,bit,bit>>1,l);//通常版 }else{//2行目から再帰 backTrack2(1,bit<<1,bit,bit>>1,l); //GPU適用版 //backTrack2D(1,bit<<1,bit,bit>>1,l);//通常版 } } l->ENDBIT>>=G.size; } return 0;//*run()の場合はreturn 0;が必要 } //pthreadによるスレッド生成 void *NQueenThread() { /** //ローカル構造体 typedef struct{ int BOUND1,BOUND2,TOPBIT,ENDBIT,SIDEMASK,LASTMASK; int mask; int aBoard[MAX]; long COUNT2[MAX],COUNT4[MAX],COUNT8[MAX]; }local ; */ local l[MAX];//構造体 local型 /** pthreadのチルドスレッド */ pthread_t pt[G.size]; /** 初期化とチルドスレッドの生成 */ for(int BOUND1=G.sizeE,BOUND2=0;BOUND2<G.sizeE;BOUND1--,BOUND2++){ /** aBoardとカウンターの初期化 */ l[BOUND1].mask=(1<<G.size)-1; l[BOUND1].BOUND1=BOUND1;l[BOUND1].BOUND2=BOUND2;//B1 と B2を初期化 for(int j=0;j<G.size;j++){ l[l->BOUND1].aBoard[j]=j; }// aB[]の初期化 l[BOUND1].COUNT2[BOUND1]=l[BOUND1].COUNT4[BOUND1]= l[BOUND1].COUNT8[BOUND1]=0;//カウンターの初期化 /** チルドスレッドの生成 pthread_createでチルドスレッドを生成します。 BOUND1がインクリメントされ、Nの数だけチルドスレッドが生成されます。 run()がチルドスレッドの実行関数となります。 */ int iFbRet=pthread_create(&pt[BOUND1],NULL,&run,&l[BOUND1]); if(iFbRet>0){ printf("[mainThread] pthread_create #%d: %d\n", l[BOUND1].BOUND1, iFbRet); } } /** チルドスレッドが実行され、全ての処理が完了するまでjoin()により待機します。 */ for(int BOUND1=G.sizeE,BOUND2=0;BOUND2<G.sizeE;BOUND1--,BOUND2++){ pthread_join(pt[BOUND1],NULL); } //スレッド毎のカウンターを合計 for(int BOUND1=G.sizeE,BOUND2=0;BOUND2<G.sizeE;BOUND1--,BOUND2++){ G.lTOTAL+=l[BOUND1].COUNT2[BOUND1]*2+ l[BOUND1].COUNT4[BOUND1]*4+l[BOUND1].COUNT8[BOUND1]*8; G.lUNIQUE+=l[BOUND1].COUNT2[BOUND1]+ l[BOUND1].COUNT4[BOUND1]+l[BOUND1].COUNT8[BOUND1]; } return 0; } // CPU/CPURの並列処理(pthread) void NQueen() { /** メインスレッドの生成 拡張子 CUDA はpthreadをサポートしていませんので実行できません コンパイルが通らないので 以下をコメントアウトします Cディレクトリの 並列処理はC13_N-Queen.c を参考にして下さい。 pthreadを使いたいときはここのコメントアウトを外します //iFbRet = pthread_create(&pth, NULL,&NQueenThread,NULL); */ pthread_t pth; //スレッド変数 int iFbRet; //pthreadを使いたいときはここのコメントアウトを外します //iFbRet = pthread_create(&pth, NULL,&NQueenThread,NULL); // if(iFbRet>0){ printf("[main] pthread_create: %d\n", iFbRet); //エラー出力デバッグ用 } pthread_join(pth,NULL); /* いちいちjoinをする */ } // int main(int argc,char** argv) { /** 実行パラメータの処理 $ nvcc -O3 CUDA13_N-Queen.cu && ./a.out (-c|-r|-g|-s) -c:cpu -r cpu再帰 -g GPU -s SGPU(サマーズ版と思われる) */ bool cpu=false,cpur=false,gpu=false,sgpu=false; int argstart=1; if(argc>=2&&argv[1][0]=='-'){ if(argv[1][1]=='c'||argv[1][1]=='C'){cpu=true;} else if(argv[1][1]=='r'||argv[1][1]=='R'){cpur=true;} else if(argv[1][1]=='c'||argv[1][1]=='C'){cpu=true;} else if(argv[1][1]=='g'||argv[1][1]=='G'){gpu=true;} else if(argv[1][1]=='s'||argv[1][1]=='S'){sgpu=true;} else{ gpu=true; } //デフォルトをgpuとする argstart=2; } if(argc<argstart){ printf("Usage: %s [-c|-g|-r|-s] n steps\n",argv[0]); printf(" -r: CPUR only\n"); printf(" -c: CPU only\n"); printf(" -g: GPU only\n"); printf(" -s: SGPU only\n"); printf("Default to 8 queen\n"); } /** 出力 $ nvcc コマンドではpthreadは動きません cpu/cpurを実行したい場合はファイル名の拡張子をcに変更した上で、 以下の行頭のコメントを外してください。 # //iFbRet = pthread_create(&pth, NULL,&NQueenThread,NULL); # 1.ソースファイルを複写し、 複写したソースファイルの拡張子を .cu から .cにリネームする CUDA13_N-Queen.cu -> CUDA13_N-Queen.c 2.ソースファイルから以下の行を探し、行頭のコメントを外してください。 //iFbRet = pthread_create(&pth, NULL,&NQueenThread,NULL); 3.以下のコマンドで実行する $ gcc -Wall -W -O3 -g -ftrapv -std=c99 -pthread CUDA13_N-Queen.c && ./a.out [-c|-r] */ if(cpu){ printf("\n\n13.CPU 非再帰 並列処理\n"); printf("pthread\n※nvccではpthreadは動きません!\n"); }else if(cpur){ printf("\n\n13.CPUR 再帰 並列処理\n"); printf("pthread\n※nvccではpthreadは動きません!\n"); }else if(gpu){ printf("\n\n13.GPU 非再帰 並列処理 CUDA\n"); }else if(sgpu){ printf("\n\n13.SGPU 非再帰 並列処理 CUDA\n"); } /** CPU(CPUによる非再起処理)とCPUR(CPUによる再帰処理)の実行 */ if(cpu||cpur) { int min=4; int targetN=17;//実行の開始Nと終了Nの指定 /** 処理時刻計測のための変数 */ struct timeval t0; struct timeval t1; printf("%s\n"," N: Total Unique dd:hh:mm:ss.ms"); for(int i=min;i<=targetN;i++){ /** size/sizeE/lTOTAL/lUNIQUEといった変数は構造体に格納されています。 typedef struct { int size; int sizeE; long lTOTAL,lUNIQUE; }GCLASS, *GClass; GCLASS G; */ G.size=i;G.sizeE=i-1;//size sizeEの初期化 G.lTOTAL=G.lUNIQUE=0;//TOTAL UNIQUEの初期化 // gettimeofday(&t0, NULL);//計測開始 /** CPU/CPURの場合において、 バックトラックをnon-recursive/recursiveのいずれかを選択 再帰:0 非再帰:1 */ if(cpur){ //再帰 //NR=0;NQueenD(); NR=0;NQueen(); } if(cpu){ //非再帰 //NR=1;NQueenD(); NR=1;NQueen(); } // gettimeofday(&t1, NULL);//計測終了 /** 時刻表記の処理 Total Unique dd:hh:mm:ss.ms 15: 2279184 285053 00:00:00:00.33 16: 14772512 1846955 00:00:00:01.59 17: 95815104 11977939 00:00:00:10.92 */ int ss;int ms;int dd; if(t1.tv_usec<t0.tv_usec) { dd=(t1.tv_sec-t0.tv_sec-1)/86400; ss=(t1.tv_sec-t0.tv_sec-1)%86400; ms=(1000000+t1.tv_usec-t0.tv_usec+500)/10000; }else { dd=(t1.tv_sec-t0.tv_sec)/86400; ss=(t1.tv_sec-t0.tv_sec)%86400; ms=(t1.tv_usec-t0.tv_usec+500)/10000; }//end if int hh=ss/3600; int mm=(ss-hh*3600)/60; ss%=60; /** 出力 */ printf("%2d:%16ld%17ld%12.2d:%02d:%02d:%02d.%02d\n", i,G.lTOTAL,G.lUNIQUE,dd,hh,mm,ss,ms); } //end for }//end if /** GPU(過去の記録樹立者サマーズGPU版SGPU処理)と GPUR(GPUによる再帰処理)の実行 */ if(gpu||sgpu) { /** 実行時にデバイスがCUDAがサポートされているかを確認 */ if(!InitCUDA()){return 0;} int steps=24576; int min=4;int targetN=21;//実行の開始Nと終了Nの指定 /** 処理時刻計測のための変数 */ struct timeval t0; struct timeval t1; /** 出力 */ printf("%s\n"," N: Total Unique dd:hh:mm:ss.ms"); /** 実行処理 */ for(int i=min;i<=targetN;i++){ gettimeofday(&t0,NULL); // 計測開始 if(gpu){//本ソースのメイン処理 TOTAL=0; UNIQUE=0; NQueenG(i,steps); } gettimeofday(&t1,NULL); // 計測終了 /** 時刻表記の処理 Total Unique dd:hh:mm:ss.ms 15: 2279184 285053 00:00:00:00.33 16: 14772512 1846955 00:00:00:01.59 17: 95815104 11977939 00:00:00:10.92 */ int ss;int ms;int dd; if (t1.tv_usec<t0.tv_usec) { dd=(int)(t1.tv_sec-t0.tv_sec-1)/86400; ss=(t1.tv_sec-t0.tv_sec-1)%86400; ms=(1000000+t1.tv_usec-t0.tv_usec+500)/10000; } else { dd=(int)(t1.tv_sec-t0.tv_sec)/86400; ss=(t1.tv_sec-t0.tv_sec)%86400; ms=(t1.tv_usec-t0.tv_usec+500)/10000; }//end if int hh=ss/3600; int mm=(ss-hh*3600)/60; ss%=60; printf("%2d:%13ld%16ld%4.2d:%02d:%02d:%02d.%02d\n", i,TOTAL,UNIQUE,dd,hh,mm,ss,ms); }//end for }//end if return 0; }
07202f119869fffb7597e42bc1fa4b08e987dea7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #define kDataLen 1000 __global__ void child(float *z) { z[blockIdx.x * blockDim.x + threadIdx.x] += 1; } __global__ void parent(float* x, float* y, float *z) { z[blockIdx.x * blockDim.x + threadIdx.x] += y[blockIdx.x * blockDim.x + threadIdx.x] + x[blockIdx.x * blockDim.x + threadIdx.x]; hipLaunchKernelGGL(( child), dim3(1), dim3(kDataLen), 0, 0, z); } int main(int argc, char* argv[]) { float host_a[kDataLen]; float host_b[kDataLen]; float host_c[kDataLen]; for (int i=0; i < kDataLen; i++) { host_a[i] = i; host_b[i] = 2*i; } // Copy input data to device. float* device_a; float* device_b; float* device_c; hipMalloc(&device_a, kDataLen * sizeof(float)); hipMalloc(&device_b, kDataLen * sizeof(float)); hipMalloc(&device_c, kDataLen * sizeof(float)); hipMemcpy(device_a, host_a, kDataLen * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(device_b, host_b, kDataLen * sizeof(float), hipMemcpyHostToDevice); // Launch the kernel. hipLaunchKernelGGL(( parent), dim3(5), dim3(kDataLen/5), 0, 0, device_a, device_b, device_c); // Copy output data to host. hipDeviceSynchronize(); hipMemcpy(host_c, device_c, kDataLen * sizeof(float), hipMemcpyDeviceToHost); // Print the results. for (int i = 0; i < kDataLen; ++i) { std::cout << "y[" << i << "] = " << host_c[i] << "\n"; } hipDeviceReset(); return 0; }
07202f119869fffb7597e42bc1fa4b08e987dea7.cu
#include <iostream> #define kDataLen 1000 __global__ void child(float *z) { z[blockIdx.x * blockDim.x + threadIdx.x] += 1; } __global__ void parent(float* x, float* y, float *z) { z[blockIdx.x * blockDim.x + threadIdx.x] += y[blockIdx.x * blockDim.x + threadIdx.x] + x[blockIdx.x * blockDim.x + threadIdx.x]; child<<<1, kDataLen>>>(z); } int main(int argc, char* argv[]) { float host_a[kDataLen]; float host_b[kDataLen]; float host_c[kDataLen]; for (int i=0; i < kDataLen; i++) { host_a[i] = i; host_b[i] = 2*i; } // Copy input data to device. float* device_a; float* device_b; float* device_c; cudaMalloc(&device_a, kDataLen * sizeof(float)); cudaMalloc(&device_b, kDataLen * sizeof(float)); cudaMalloc(&device_c, kDataLen * sizeof(float)); cudaMemcpy(device_a, host_a, kDataLen * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(device_b, host_b, kDataLen * sizeof(float), cudaMemcpyHostToDevice); // Launch the kernel. parent<<<5, kDataLen/5>>>(device_a, device_b, device_c); // Copy output data to host. cudaDeviceSynchronize(); cudaMemcpy(host_c, device_c, kDataLen * sizeof(float), cudaMemcpyDeviceToHost); // Print the results. for (int i = 0; i < kDataLen; ++i) { std::cout << "y[" << i << "] = " << host_c[i] << "\n"; } cudaDeviceReset(); return 0; }